diff options
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r-- | drivers/infiniband/core/Makefile | 4 | ||||
-rw-r--r-- | drivers/infiniband/core/addr.c | 22 | ||||
-rw-r--r-- | drivers/infiniband/core/cache.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/core/cm.c | 66 | ||||
-rw-r--r-- | drivers/infiniband/core/cma.c | 403 | ||||
-rw-r--r-- | drivers/infiniband/core/device.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/core/iwcm.c | 1019 | ||||
-rw-r--r-- | drivers/infiniband/core/iwcm.h | 62 | ||||
-rw-r--r-- | drivers/infiniband/core/mad.c | 19 | ||||
-rw-r--r-- | drivers/infiniband/core/mad_priv.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/core/mad_rmpp.c | 94 | ||||
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 67 | ||||
-rw-r--r-- | drivers/infiniband/core/smi.c | 16 | ||||
-rw-r--r-- | drivers/infiniband/core/sysfs.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/core/ucm.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/core/user_mad.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_cmd.c | 64 | ||||
-rw-r--r-- | drivers/infiniband/core/verbs.c | 21 |
18 files changed, 1731 insertions, 167 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 68e73ec2d1f8..163d991eb8c9 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o | 1 | infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o |
2 | 2 | ||
3 | obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ | 3 | obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ |
4 | ib_cm.o $(infiniband-y) | 4 | ib_cm.o iw_cm.o $(infiniband-y) |
5 | obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o | 5 | obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o |
6 | obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o | 6 | obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o |
7 | 7 | ||
@@ -14,6 +14,8 @@ ib_sa-y := sa_query.o | |||
14 | 14 | ||
15 | ib_cm-y := cm.o | 15 | ib_cm-y := cm.o |
16 | 16 | ||
17 | iw_cm-y := iwcm.o | ||
18 | |||
17 | rdma_cm-y := cma.o | 19 | rdma_cm-y := cma.o |
18 | 20 | ||
19 | ib_addr-y := addr.o | 21 | ib_addr-y := addr.o |
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 1205e8027829..9cbf09e2052f 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -61,12 +61,15 @@ static LIST_HEAD(req_list); | |||
61 | static DECLARE_WORK(work, process_req, NULL); | 61 | static DECLARE_WORK(work, process_req, NULL); |
62 | static struct workqueue_struct *addr_wq; | 62 | static struct workqueue_struct *addr_wq; |
63 | 63 | ||
64 | static int copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, | 64 | int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, |
65 | unsigned char *dst_dev_addr) | 65 | const unsigned char *dst_dev_addr) |
66 | { | 66 | { |
67 | switch (dev->type) { | 67 | switch (dev->type) { |
68 | case ARPHRD_INFINIBAND: | 68 | case ARPHRD_INFINIBAND: |
69 | dev_addr->dev_type = IB_NODE_CA; | 69 | dev_addr->dev_type = RDMA_NODE_IB_CA; |
70 | break; | ||
71 | case ARPHRD_ETHER: | ||
72 | dev_addr->dev_type = RDMA_NODE_RNIC; | ||
70 | break; | 73 | break; |
71 | default: | 74 | default: |
72 | return -EADDRNOTAVAIL; | 75 | return -EADDRNOTAVAIL; |
@@ -78,6 +81,7 @@ static int copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, | |||
78 | memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN); | 81 | memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN); |
79 | return 0; | 82 | return 0; |
80 | } | 83 | } |
84 | EXPORT_SYMBOL(rdma_copy_addr); | ||
81 | 85 | ||
82 | int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) | 86 | int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) |
83 | { | 87 | { |
@@ -89,7 +93,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) | |||
89 | if (!dev) | 93 | if (!dev) |
90 | return -EADDRNOTAVAIL; | 94 | return -EADDRNOTAVAIL; |
91 | 95 | ||
92 | ret = copy_addr(dev_addr, dev, NULL); | 96 | ret = rdma_copy_addr(dev_addr, dev, NULL); |
93 | dev_put(dev); | 97 | dev_put(dev); |
94 | return ret; | 98 | return ret; |
95 | } | 99 | } |
@@ -161,7 +165,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in, | |||
161 | 165 | ||
162 | /* If the device does ARP internally, return 'done' */ | 166 | /* If the device does ARP internally, return 'done' */ |
163 | if (rt->idev->dev->flags & IFF_NOARP) { | 167 | if (rt->idev->dev->flags & IFF_NOARP) { |
164 | copy_addr(addr, rt->idev->dev, NULL); | 168 | rdma_copy_addr(addr, rt->idev->dev, NULL); |
165 | goto put; | 169 | goto put; |
166 | } | 170 | } |
167 | 171 | ||
@@ -181,7 +185,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in, | |||
181 | src_in->sin_addr.s_addr = rt->rt_src; | 185 | src_in->sin_addr.s_addr = rt->rt_src; |
182 | } | 186 | } |
183 | 187 | ||
184 | ret = copy_addr(addr, neigh->dev, neigh->ha); | 188 | ret = rdma_copy_addr(addr, neigh->dev, neigh->ha); |
185 | release: | 189 | release: |
186 | neigh_release(neigh); | 190 | neigh_release(neigh); |
187 | put: | 191 | put: |
@@ -245,7 +249,7 @@ static int addr_resolve_local(struct sockaddr_in *src_in, | |||
245 | if (ZERONET(src_ip)) { | 249 | if (ZERONET(src_ip)) { |
246 | src_in->sin_family = dst_in->sin_family; | 250 | src_in->sin_family = dst_in->sin_family; |
247 | src_in->sin_addr.s_addr = dst_ip; | 251 | src_in->sin_addr.s_addr = dst_ip; |
248 | ret = copy_addr(addr, dev, dev->dev_addr); | 252 | ret = rdma_copy_addr(addr, dev, dev->dev_addr); |
249 | } else if (LOOPBACK(src_ip)) { | 253 | } else if (LOOPBACK(src_ip)) { |
250 | ret = rdma_translate_ip((struct sockaddr *)dst_in, addr); | 254 | ret = rdma_translate_ip((struct sockaddr *)dst_in, addr); |
251 | if (!ret) | 255 | if (!ret) |
@@ -327,10 +331,10 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) | |||
327 | } | 331 | } |
328 | EXPORT_SYMBOL(rdma_addr_cancel); | 332 | EXPORT_SYMBOL(rdma_addr_cancel); |
329 | 333 | ||
330 | static int netevent_callback(struct notifier_block *self, unsigned long event, | 334 | static int netevent_callback(struct notifier_block *self, unsigned long event, |
331 | void *ctx) | 335 | void *ctx) |
332 | { | 336 | { |
333 | if (event == NETEVENT_NEIGH_UPDATE) { | 337 | if (event == NETEVENT_NEIGH_UPDATE) { |
334 | struct neighbour *neigh = ctx; | 338 | struct neighbour *neigh = ctx; |
335 | 339 | ||
336 | if (neigh->dev->type == ARPHRD_INFINIBAND && | 340 | if (neigh->dev->type == ARPHRD_INFINIBAND && |
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 75313ade2e0d..20e9f64e67a6 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
@@ -62,12 +62,13 @@ struct ib_update_work { | |||
62 | 62 | ||
63 | static inline int start_port(struct ib_device *device) | 63 | static inline int start_port(struct ib_device *device) |
64 | { | 64 | { |
65 | return device->node_type == IB_NODE_SWITCH ? 0 : 1; | 65 | return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; |
66 | } | 66 | } |
67 | 67 | ||
68 | static inline int end_port(struct ib_device *device) | 68 | static inline int end_port(struct ib_device *device) |
69 | { | 69 | { |
70 | return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt; | 70 | return (device->node_type == RDMA_NODE_IB_SWITCH) ? |
71 | 0 : device->phys_port_cnt; | ||
71 | } | 72 | } |
72 | 73 | ||
73 | int ib_get_cached_gid(struct ib_device *device, | 74 | int ib_get_cached_gid(struct ib_device *device, |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 0de335b7bfc2..f35fcc4c0638 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. | 2 | * Copyright (c) 2004-2006 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. | 3 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
4 | * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. | 4 | * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. |
5 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | 5 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/idr.h> | 41 | #include <linux/idr.h> |
42 | #include <linux/interrupt.h> | 42 | #include <linux/interrupt.h> |
43 | #include <linux/pci.h> | 43 | #include <linux/pci.h> |
44 | #include <linux/random.h> | ||
44 | #include <linux/rbtree.h> | 45 | #include <linux/rbtree.h> |
45 | #include <linux/spinlock.h> | 46 | #include <linux/spinlock.h> |
46 | #include <linux/workqueue.h> | 47 | #include <linux/workqueue.h> |
@@ -73,6 +74,7 @@ static struct ib_cm { | |||
73 | struct rb_root remote_id_table; | 74 | struct rb_root remote_id_table; |
74 | struct rb_root remote_sidr_table; | 75 | struct rb_root remote_sidr_table; |
75 | struct idr local_id_table; | 76 | struct idr local_id_table; |
77 | __be32 random_id_operand; | ||
76 | struct workqueue_struct *wq; | 78 | struct workqueue_struct *wq; |
77 | } cm; | 79 | } cm; |
78 | 80 | ||
@@ -177,7 +179,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, | |||
177 | if (IS_ERR(ah)) | 179 | if (IS_ERR(ah)) |
178 | return PTR_ERR(ah); | 180 | return PTR_ERR(ah); |
179 | 181 | ||
180 | m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, | 182 | m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, |
181 | cm_id_priv->av.pkey_index, | 183 | cm_id_priv->av.pkey_index, |
182 | 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, | 184 | 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, |
183 | GFP_ATOMIC); | 185 | GFP_ATOMIC); |
@@ -299,15 +301,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) | |||
299 | static int cm_alloc_id(struct cm_id_private *cm_id_priv) | 301 | static int cm_alloc_id(struct cm_id_private *cm_id_priv) |
300 | { | 302 | { |
301 | unsigned long flags; | 303 | unsigned long flags; |
302 | int ret; | 304 | int ret, id; |
303 | static int next_id; | 305 | static int next_id; |
304 | 306 | ||
305 | do { | 307 | do { |
306 | spin_lock_irqsave(&cm.lock, flags); | 308 | spin_lock_irqsave(&cm.lock, flags); |
307 | ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++, | 309 | ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, |
308 | (__force int *) &cm_id_priv->id.local_id); | 310 | next_id++, &id); |
309 | spin_unlock_irqrestore(&cm.lock, flags); | 311 | spin_unlock_irqrestore(&cm.lock, flags); |
310 | } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); | 312 | } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); |
313 | |||
314 | cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand); | ||
311 | return ret; | 315 | return ret; |
312 | } | 316 | } |
313 | 317 | ||
@@ -316,7 +320,8 @@ static void cm_free_id(__be32 local_id) | |||
316 | unsigned long flags; | 320 | unsigned long flags; |
317 | 321 | ||
318 | spin_lock_irqsave(&cm.lock, flags); | 322 | spin_lock_irqsave(&cm.lock, flags); |
319 | idr_remove(&cm.local_id_table, (__force int) local_id); | 323 | idr_remove(&cm.local_id_table, |
324 | (__force int) (local_id ^ cm.random_id_operand)); | ||
320 | spin_unlock_irqrestore(&cm.lock, flags); | 325 | spin_unlock_irqrestore(&cm.lock, flags); |
321 | } | 326 | } |
322 | 327 | ||
@@ -324,7 +329,8 @@ static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) | |||
324 | { | 329 | { |
325 | struct cm_id_private *cm_id_priv; | 330 | struct cm_id_private *cm_id_priv; |
326 | 331 | ||
327 | cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id); | 332 | cm_id_priv = idr_find(&cm.local_id_table, |
333 | (__force int) (local_id ^ cm.random_id_operand)); | ||
328 | if (cm_id_priv) { | 334 | if (cm_id_priv) { |
329 | if (cm_id_priv->id.remote_id == remote_id) | 335 | if (cm_id_priv->id.remote_id == remote_id) |
330 | atomic_inc(&cm_id_priv->refcount); | 336 | atomic_inc(&cm_id_priv->refcount); |
@@ -679,6 +685,8 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv) | |||
679 | { | 685 | { |
680 | int wait_time; | 686 | int wait_time; |
681 | 687 | ||
688 | cm_cleanup_timewait(cm_id_priv->timewait_info); | ||
689 | |||
682 | /* | 690 | /* |
683 | * The cm_id could be destroyed by the user before we exit timewait. | 691 | * The cm_id could be destroyed by the user before we exit timewait. |
684 | * To protect against this, we search for the cm_id after exiting | 692 | * To protect against this, we search for the cm_id after exiting |
@@ -1354,7 +1362,7 @@ static int cm_req_handler(struct cm_work *work) | |||
1354 | id.local_id); | 1362 | id.local_id); |
1355 | if (IS_ERR(cm_id_priv->timewait_info)) { | 1363 | if (IS_ERR(cm_id_priv->timewait_info)) { |
1356 | ret = PTR_ERR(cm_id_priv->timewait_info); | 1364 | ret = PTR_ERR(cm_id_priv->timewait_info); |
1357 | goto error1; | 1365 | goto destroy; |
1358 | } | 1366 | } |
1359 | cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; | 1367 | cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; |
1360 | cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; | 1368 | cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; |
@@ -1363,7 +1371,8 @@ static int cm_req_handler(struct cm_work *work) | |||
1363 | listen_cm_id_priv = cm_match_req(work, cm_id_priv); | 1371 | listen_cm_id_priv = cm_match_req(work, cm_id_priv); |
1364 | if (!listen_cm_id_priv) { | 1372 | if (!listen_cm_id_priv) { |
1365 | ret = -EINVAL; | 1373 | ret = -EINVAL; |
1366 | goto error2; | 1374 | kfree(cm_id_priv->timewait_info); |
1375 | goto destroy; | ||
1367 | } | 1376 | } |
1368 | 1377 | ||
1369 | cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; | 1378 | cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; |
@@ -1373,12 +1382,22 @@ static int cm_req_handler(struct cm_work *work) | |||
1373 | 1382 | ||
1374 | cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); | 1383 | cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); |
1375 | ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); | 1384 | ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); |
1376 | if (ret) | 1385 | if (ret) { |
1377 | goto error3; | 1386 | ib_get_cached_gid(work->port->cm_dev->device, |
1387 | work->port->port_num, 0, &work->path[0].sgid); | ||
1388 | ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, | ||
1389 | &work->path[0].sgid, sizeof work->path[0].sgid, | ||
1390 | NULL, 0); | ||
1391 | goto rejected; | ||
1392 | } | ||
1378 | if (req_msg->alt_local_lid) { | 1393 | if (req_msg->alt_local_lid) { |
1379 | ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); | 1394 | ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); |
1380 | if (ret) | 1395 | if (ret) { |
1381 | goto error3; | 1396 | ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, |
1397 | &work->path[0].sgid, | ||
1398 | sizeof work->path[0].sgid, NULL, 0); | ||
1399 | goto rejected; | ||
1400 | } | ||
1382 | } | 1401 | } |
1383 | cm_id_priv->tid = req_msg->hdr.tid; | 1402 | cm_id_priv->tid = req_msg->hdr.tid; |
1384 | cm_id_priv->timeout_ms = cm_convert_to_ms( | 1403 | cm_id_priv->timeout_ms = cm_convert_to_ms( |
@@ -1400,12 +1419,11 @@ static int cm_req_handler(struct cm_work *work) | |||
1400 | cm_deref_id(listen_cm_id_priv); | 1419 | cm_deref_id(listen_cm_id_priv); |
1401 | return 0; | 1420 | return 0; |
1402 | 1421 | ||
1403 | error3: atomic_dec(&cm_id_priv->refcount); | 1422 | rejected: |
1423 | atomic_dec(&cm_id_priv->refcount); | ||
1404 | cm_deref_id(listen_cm_id_priv); | 1424 | cm_deref_id(listen_cm_id_priv); |
1405 | cm_cleanup_timewait(cm_id_priv->timewait_info); | 1425 | destroy: |
1406 | error2: kfree(cm_id_priv->timewait_info); | 1426 | ib_destroy_cm_id(cm_id); |
1407 | cm_id_priv->timewait_info = NULL; | ||
1408 | error1: ib_destroy_cm_id(&cm_id_priv->id); | ||
1409 | return ret; | 1427 | return ret; |
1410 | } | 1428 | } |
1411 | 1429 | ||
@@ -2072,8 +2090,9 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) | |||
2072 | spin_unlock_irqrestore(&cm.lock, flags); | 2090 | spin_unlock_irqrestore(&cm.lock, flags); |
2073 | return NULL; | 2091 | return NULL; |
2074 | } | 2092 | } |
2075 | cm_id_priv = idr_find(&cm.local_id_table, | 2093 | cm_id_priv = idr_find(&cm.local_id_table, (__force int) |
2076 | (__force int) timewait_info->work.local_id); | 2094 | (timewait_info->work.local_id ^ |
2095 | cm.random_id_operand)); | ||
2077 | if (cm_id_priv) { | 2096 | if (cm_id_priv) { |
2078 | if (cm_id_priv->id.remote_id == remote_id) | 2097 | if (cm_id_priv->id.remote_id == remote_id) |
2079 | atomic_inc(&cm_id_priv->refcount); | 2098 | atomic_inc(&cm_id_priv->refcount); |
@@ -3125,7 +3144,8 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, | |||
3125 | qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | | 3144 | qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | |
3126 | IB_ACCESS_REMOTE_WRITE; | 3145 | IB_ACCESS_REMOTE_WRITE; |
3127 | if (cm_id_priv->responder_resources) | 3146 | if (cm_id_priv->responder_resources) |
3128 | qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ; | 3147 | qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | |
3148 | IB_ACCESS_REMOTE_ATOMIC; | ||
3129 | qp_attr->pkey_index = cm_id_priv->av.pkey_index; | 3149 | qp_attr->pkey_index = cm_id_priv->av.pkey_index; |
3130 | qp_attr->port_num = cm_id_priv->av.port->port_num; | 3150 | qp_attr->port_num = cm_id_priv->av.port->port_num; |
3131 | ret = 0; | 3151 | ret = 0; |
@@ -3262,6 +3282,9 @@ static void cm_add_one(struct ib_device *device) | |||
3262 | int ret; | 3282 | int ret; |
3263 | u8 i; | 3283 | u8 i; |
3264 | 3284 | ||
3285 | if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) | ||
3286 | return; | ||
3287 | |||
3265 | cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * | 3288 | cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * |
3266 | device->phys_port_cnt, GFP_KERNEL); | 3289 | device->phys_port_cnt, GFP_KERNEL); |
3267 | if (!cm_dev) | 3290 | if (!cm_dev) |
@@ -3349,6 +3372,7 @@ static int __init ib_cm_init(void) | |||
3349 | cm.remote_qp_table = RB_ROOT; | 3372 | cm.remote_qp_table = RB_ROOT; |
3350 | cm.remote_sidr_table = RB_ROOT; | 3373 | cm.remote_sidr_table = RB_ROOT; |
3351 | idr_init(&cm.local_id_table); | 3374 | idr_init(&cm.local_id_table); |
3375 | get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); | ||
3352 | idr_pre_get(&cm.local_id_table, GFP_KERNEL); | 3376 | idr_pre_get(&cm.local_id_table, GFP_KERNEL); |
3353 | 3377 | ||
3354 | cm.wq = create_workqueue("ib_cm"); | 3378 | cm.wq = create_workqueue("ib_cm"); |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 5d625a81193f..1178bd434d1b 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/mutex.h> | 35 | #include <linux/mutex.h> |
36 | #include <linux/random.h> | 36 | #include <linux/random.h> |
37 | #include <linux/idr.h> | 37 | #include <linux/idr.h> |
38 | #include <linux/inetdevice.h> | ||
38 | 39 | ||
39 | #include <net/tcp.h> | 40 | #include <net/tcp.h> |
40 | 41 | ||
@@ -43,6 +44,7 @@ | |||
43 | #include <rdma/ib_cache.h> | 44 | #include <rdma/ib_cache.h> |
44 | #include <rdma/ib_cm.h> | 45 | #include <rdma/ib_cm.h> |
45 | #include <rdma/ib_sa.h> | 46 | #include <rdma/ib_sa.h> |
47 | #include <rdma/iw_cm.h> | ||
46 | 48 | ||
47 | MODULE_AUTHOR("Sean Hefty"); | 49 | MODULE_AUTHOR("Sean Hefty"); |
48 | MODULE_DESCRIPTION("Generic RDMA CM Agent"); | 50 | MODULE_DESCRIPTION("Generic RDMA CM Agent"); |
@@ -60,6 +62,7 @@ static struct ib_client cma_client = { | |||
60 | .remove = cma_remove_one | 62 | .remove = cma_remove_one |
61 | }; | 63 | }; |
62 | 64 | ||
65 | static struct ib_sa_client sa_client; | ||
63 | static LIST_HEAD(dev_list); | 66 | static LIST_HEAD(dev_list); |
64 | static LIST_HEAD(listen_any_list); | 67 | static LIST_HEAD(listen_any_list); |
65 | static DEFINE_MUTEX(lock); | 68 | static DEFINE_MUTEX(lock); |
@@ -124,6 +127,7 @@ struct rdma_id_private { | |||
124 | int query_id; | 127 | int query_id; |
125 | union { | 128 | union { |
126 | struct ib_cm_id *ib; | 129 | struct ib_cm_id *ib; |
130 | struct iw_cm_id *iw; | ||
127 | } cm_id; | 131 | } cm_id; |
128 | 132 | ||
129 | u32 seq_num; | 133 | u32 seq_num; |
@@ -259,15 +263,24 @@ static void cma_detach_from_dev(struct rdma_id_private *id_priv) | |||
259 | id_priv->cma_dev = NULL; | 263 | id_priv->cma_dev = NULL; |
260 | } | 264 | } |
261 | 265 | ||
262 | static int cma_acquire_ib_dev(struct rdma_id_private *id_priv) | 266 | static int cma_acquire_dev(struct rdma_id_private *id_priv) |
263 | { | 267 | { |
268 | enum rdma_node_type dev_type = id_priv->id.route.addr.dev_addr.dev_type; | ||
264 | struct cma_device *cma_dev; | 269 | struct cma_device *cma_dev; |
265 | union ib_gid gid; | 270 | union ib_gid gid; |
266 | int ret = -ENODEV; | 271 | int ret = -ENODEV; |
267 | 272 | ||
268 | ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid), | 273 | switch (rdma_node_get_transport(dev_type)) { |
274 | case RDMA_TRANSPORT_IB: | ||
275 | ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); | ||
276 | break; | ||
277 | case RDMA_TRANSPORT_IWARP: | ||
278 | iw_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); | ||
279 | break; | ||
280 | default: | ||
281 | return -ENODEV; | ||
282 | } | ||
269 | 283 | ||
270 | mutex_lock(&lock); | ||
271 | list_for_each_entry(cma_dev, &dev_list, list) { | 284 | list_for_each_entry(cma_dev, &dev_list, list) { |
272 | ret = ib_find_cached_gid(cma_dev->device, &gid, | 285 | ret = ib_find_cached_gid(cma_dev->device, &gid, |
273 | &id_priv->id.port_num, NULL); | 286 | &id_priv->id.port_num, NULL); |
@@ -276,20 +289,9 @@ static int cma_acquire_ib_dev(struct rdma_id_private *id_priv) | |||
276 | break; | 289 | break; |
277 | } | 290 | } |
278 | } | 291 | } |
279 | mutex_unlock(&lock); | ||
280 | return ret; | 292 | return ret; |
281 | } | 293 | } |
282 | 294 | ||
283 | static int cma_acquire_dev(struct rdma_id_private *id_priv) | ||
284 | { | ||
285 | switch (id_priv->id.route.addr.dev_addr.dev_type) { | ||
286 | case IB_NODE_CA: | ||
287 | return cma_acquire_ib_dev(id_priv); | ||
288 | default: | ||
289 | return -ENODEV; | ||
290 | } | ||
291 | } | ||
292 | |||
293 | static void cma_deref_id(struct rdma_id_private *id_priv) | 295 | static void cma_deref_id(struct rdma_id_private *id_priv) |
294 | { | 296 | { |
295 | if (atomic_dec_and_test(&id_priv->refcount)) | 297 | if (atomic_dec_and_test(&id_priv->refcount)) |
@@ -347,6 +349,16 @@ static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) | |||
347 | IB_QP_PKEY_INDEX | IB_QP_PORT); | 349 | IB_QP_PKEY_INDEX | IB_QP_PORT); |
348 | } | 350 | } |
349 | 351 | ||
352 | static int cma_init_iw_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) | ||
353 | { | ||
354 | struct ib_qp_attr qp_attr; | ||
355 | |||
356 | qp_attr.qp_state = IB_QPS_INIT; | ||
357 | qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; | ||
358 | |||
359 | return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS); | ||
360 | } | ||
361 | |||
350 | int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, | 362 | int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, |
351 | struct ib_qp_init_attr *qp_init_attr) | 363 | struct ib_qp_init_attr *qp_init_attr) |
352 | { | 364 | { |
@@ -362,10 +374,13 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, | |||
362 | if (IS_ERR(qp)) | 374 | if (IS_ERR(qp)) |
363 | return PTR_ERR(qp); | 375 | return PTR_ERR(qp); |
364 | 376 | ||
365 | switch (id->device->node_type) { | 377 | switch (rdma_node_get_transport(id->device->node_type)) { |
366 | case IB_NODE_CA: | 378 | case RDMA_TRANSPORT_IB: |
367 | ret = cma_init_ib_qp(id_priv, qp); | 379 | ret = cma_init_ib_qp(id_priv, qp); |
368 | break; | 380 | break; |
381 | case RDMA_TRANSPORT_IWARP: | ||
382 | ret = cma_init_iw_qp(id_priv, qp); | ||
383 | break; | ||
369 | default: | 384 | default: |
370 | ret = -ENOSYS; | 385 | ret = -ENOSYS; |
371 | break; | 386 | break; |
@@ -451,13 +466,17 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, | |||
451 | int ret; | 466 | int ret; |
452 | 467 | ||
453 | id_priv = container_of(id, struct rdma_id_private, id); | 468 | id_priv = container_of(id, struct rdma_id_private, id); |
454 | switch (id_priv->id.device->node_type) { | 469 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { |
455 | case IB_NODE_CA: | 470 | case RDMA_TRANSPORT_IB: |
456 | ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, | 471 | ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, |
457 | qp_attr_mask); | 472 | qp_attr_mask); |
458 | if (qp_attr->qp_state == IB_QPS_RTR) | 473 | if (qp_attr->qp_state == IB_QPS_RTR) |
459 | qp_attr->rq_psn = id_priv->seq_num; | 474 | qp_attr->rq_psn = id_priv->seq_num; |
460 | break; | 475 | break; |
476 | case RDMA_TRANSPORT_IWARP: | ||
477 | ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, | ||
478 | qp_attr_mask); | ||
479 | break; | ||
461 | default: | 480 | default: |
462 | ret = -ENOSYS; | 481 | ret = -ENOSYS; |
463 | break; | 482 | break; |
@@ -590,8 +609,8 @@ static int cma_notify_user(struct rdma_id_private *id_priv, | |||
590 | 609 | ||
591 | static void cma_cancel_route(struct rdma_id_private *id_priv) | 610 | static void cma_cancel_route(struct rdma_id_private *id_priv) |
592 | { | 611 | { |
593 | switch (id_priv->id.device->node_type) { | 612 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { |
594 | case IB_NODE_CA: | 613 | case RDMA_TRANSPORT_IB: |
595 | if (id_priv->query) | 614 | if (id_priv->query) |
596 | ib_sa_cancel_query(id_priv->query_id, id_priv->query); | 615 | ib_sa_cancel_query(id_priv->query_id, id_priv->query); |
597 | break; | 616 | break; |
@@ -611,11 +630,15 @@ static void cma_destroy_listen(struct rdma_id_private *id_priv) | |||
611 | cma_exch(id_priv, CMA_DESTROYING); | 630 | cma_exch(id_priv, CMA_DESTROYING); |
612 | 631 | ||
613 | if (id_priv->cma_dev) { | 632 | if (id_priv->cma_dev) { |
614 | switch (id_priv->id.device->node_type) { | 633 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { |
615 | case IB_NODE_CA: | 634 | case RDMA_TRANSPORT_IB: |
616 | if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) | 635 | if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) |
617 | ib_destroy_cm_id(id_priv->cm_id.ib); | 636 | ib_destroy_cm_id(id_priv->cm_id.ib); |
618 | break; | 637 | break; |
638 | case RDMA_TRANSPORT_IWARP: | ||
639 | if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) | ||
640 | iw_destroy_cm_id(id_priv->cm_id.iw); | ||
641 | break; | ||
619 | default: | 642 | default: |
620 | break; | 643 | break; |
621 | } | 644 | } |
@@ -689,19 +712,25 @@ void rdma_destroy_id(struct rdma_cm_id *id) | |||
689 | state = cma_exch(id_priv, CMA_DESTROYING); | 712 | state = cma_exch(id_priv, CMA_DESTROYING); |
690 | cma_cancel_operation(id_priv, state); | 713 | cma_cancel_operation(id_priv, state); |
691 | 714 | ||
715 | mutex_lock(&lock); | ||
692 | if (id_priv->cma_dev) { | 716 | if (id_priv->cma_dev) { |
693 | switch (id->device->node_type) { | 717 | mutex_unlock(&lock); |
694 | case IB_NODE_CA: | 718 | switch (rdma_node_get_transport(id->device->node_type)) { |
695 | if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) | 719 | case RDMA_TRANSPORT_IB: |
720 | if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) | ||
696 | ib_destroy_cm_id(id_priv->cm_id.ib); | 721 | ib_destroy_cm_id(id_priv->cm_id.ib); |
697 | break; | 722 | break; |
723 | case RDMA_TRANSPORT_IWARP: | ||
724 | if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) | ||
725 | iw_destroy_cm_id(id_priv->cm_id.iw); | ||
726 | break; | ||
698 | default: | 727 | default: |
699 | break; | 728 | break; |
700 | } | 729 | } |
701 | mutex_lock(&lock); | 730 | mutex_lock(&lock); |
702 | cma_detach_from_dev(id_priv); | 731 | cma_detach_from_dev(id_priv); |
703 | mutex_unlock(&lock); | ||
704 | } | 732 | } |
733 | mutex_unlock(&lock); | ||
705 | 734 | ||
706 | cma_release_port(id_priv); | 735 | cma_release_port(id_priv); |
707 | cma_deref_id(id_priv); | 736 | cma_deref_id(id_priv); |
@@ -869,7 +898,7 @@ static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id, | |||
869 | ib_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); | 898 | ib_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); |
870 | ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); | 899 | ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); |
871 | ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); | 900 | ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); |
872 | rt->addr.dev_addr.dev_type = IB_NODE_CA; | 901 | rt->addr.dev_addr.dev_type = RDMA_NODE_IB_CA; |
873 | 902 | ||
874 | id_priv = container_of(id, struct rdma_id_private, id); | 903 | id_priv = container_of(id, struct rdma_id_private, id); |
875 | id_priv->state = CMA_CONNECT; | 904 | id_priv->state = CMA_CONNECT; |
@@ -898,7 +927,9 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
898 | } | 927 | } |
899 | 928 | ||
900 | atomic_inc(&conn_id->dev_remove); | 929 | atomic_inc(&conn_id->dev_remove); |
901 | ret = cma_acquire_ib_dev(conn_id); | 930 | mutex_lock(&lock); |
931 | ret = cma_acquire_dev(conn_id); | ||
932 | mutex_unlock(&lock); | ||
902 | if (ret) { | 933 | if (ret) { |
903 | ret = -ENODEV; | 934 | ret = -ENODEV; |
904 | cma_release_remove(conn_id); | 935 | cma_release_remove(conn_id); |
@@ -982,6 +1013,130 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, | |||
982 | } | 1013 | } |
983 | } | 1014 | } |
984 | 1015 | ||
1016 | static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | ||
1017 | { | ||
1018 | struct rdma_id_private *id_priv = iw_id->context; | ||
1019 | enum rdma_cm_event_type event = 0; | ||
1020 | struct sockaddr_in *sin; | ||
1021 | int ret = 0; | ||
1022 | |||
1023 | atomic_inc(&id_priv->dev_remove); | ||
1024 | |||
1025 | switch (iw_event->event) { | ||
1026 | case IW_CM_EVENT_CLOSE: | ||
1027 | event = RDMA_CM_EVENT_DISCONNECTED; | ||
1028 | break; | ||
1029 | case IW_CM_EVENT_CONNECT_REPLY: | ||
1030 | sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; | ||
1031 | *sin = iw_event->local_addr; | ||
1032 | sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; | ||
1033 | *sin = iw_event->remote_addr; | ||
1034 | if (iw_event->status) | ||
1035 | event = RDMA_CM_EVENT_REJECTED; | ||
1036 | else | ||
1037 | event = RDMA_CM_EVENT_ESTABLISHED; | ||
1038 | break; | ||
1039 | case IW_CM_EVENT_ESTABLISHED: | ||
1040 | event = RDMA_CM_EVENT_ESTABLISHED; | ||
1041 | break; | ||
1042 | default: | ||
1043 | BUG_ON(1); | ||
1044 | } | ||
1045 | |||
1046 | ret = cma_notify_user(id_priv, event, iw_event->status, | ||
1047 | iw_event->private_data, | ||
1048 | iw_event->private_data_len); | ||
1049 | if (ret) { | ||
1050 | /* Destroy the CM ID by returning a non-zero value. */ | ||
1051 | id_priv->cm_id.iw = NULL; | ||
1052 | cma_exch(id_priv, CMA_DESTROYING); | ||
1053 | cma_release_remove(id_priv); | ||
1054 | rdma_destroy_id(&id_priv->id); | ||
1055 | return ret; | ||
1056 | } | ||
1057 | |||
1058 | cma_release_remove(id_priv); | ||
1059 | return ret; | ||
1060 | } | ||
1061 | |||
1062 | static int iw_conn_req_handler(struct iw_cm_id *cm_id, | ||
1063 | struct iw_cm_event *iw_event) | ||
1064 | { | ||
1065 | struct rdma_cm_id *new_cm_id; | ||
1066 | struct rdma_id_private *listen_id, *conn_id; | ||
1067 | struct sockaddr_in *sin; | ||
1068 | struct net_device *dev = NULL; | ||
1069 | int ret; | ||
1070 | |||
1071 | listen_id = cm_id->context; | ||
1072 | atomic_inc(&listen_id->dev_remove); | ||
1073 | if (!cma_comp(listen_id, CMA_LISTEN)) { | ||
1074 | ret = -ECONNABORTED; | ||
1075 | goto out; | ||
1076 | } | ||
1077 | |||
1078 | /* Create a new RDMA id for the new IW CM ID */ | ||
1079 | new_cm_id = rdma_create_id(listen_id->id.event_handler, | ||
1080 | listen_id->id.context, | ||
1081 | RDMA_PS_TCP); | ||
1082 | if (!new_cm_id) { | ||
1083 | ret = -ENOMEM; | ||
1084 | goto out; | ||
1085 | } | ||
1086 | conn_id = container_of(new_cm_id, struct rdma_id_private, id); | ||
1087 | atomic_inc(&conn_id->dev_remove); | ||
1088 | conn_id->state = CMA_CONNECT; | ||
1089 | |||
1090 | dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr); | ||
1091 | if (!dev) { | ||
1092 | ret = -EADDRNOTAVAIL; | ||
1093 | cma_release_remove(conn_id); | ||
1094 | rdma_destroy_id(new_cm_id); | ||
1095 | goto out; | ||
1096 | } | ||
1097 | ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); | ||
1098 | if (ret) { | ||
1099 | cma_release_remove(conn_id); | ||
1100 | rdma_destroy_id(new_cm_id); | ||
1101 | goto out; | ||
1102 | } | ||
1103 | |||
1104 | mutex_lock(&lock); | ||
1105 | ret = cma_acquire_dev(conn_id); | ||
1106 | mutex_unlock(&lock); | ||
1107 | if (ret) { | ||
1108 | cma_release_remove(conn_id); | ||
1109 | rdma_destroy_id(new_cm_id); | ||
1110 | goto out; | ||
1111 | } | ||
1112 | |||
1113 | conn_id->cm_id.iw = cm_id; | ||
1114 | cm_id->context = conn_id; | ||
1115 | cm_id->cm_handler = cma_iw_handler; | ||
1116 | |||
1117 | sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr; | ||
1118 | *sin = iw_event->local_addr; | ||
1119 | sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; | ||
1120 | *sin = iw_event->remote_addr; | ||
1121 | |||
1122 | ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, | ||
1123 | iw_event->private_data, | ||
1124 | iw_event->private_data_len); | ||
1125 | if (ret) { | ||
1126 | /* User wants to destroy the CM ID */ | ||
1127 | conn_id->cm_id.iw = NULL; | ||
1128 | cma_exch(conn_id, CMA_DESTROYING); | ||
1129 | cma_release_remove(conn_id); | ||
1130 | rdma_destroy_id(&conn_id->id); | ||
1131 | } | ||
1132 | |||
1133 | out: | ||
1134 | if (dev) | ||
1135 | dev_put(dev); | ||
1136 | cma_release_remove(listen_id); | ||
1137 | return ret; | ||
1138 | } | ||
1139 | |||
985 | static int cma_ib_listen(struct rdma_id_private *id_priv) | 1140 | static int cma_ib_listen(struct rdma_id_private *id_priv) |
986 | { | 1141 | { |
987 | struct ib_cm_compare_data compare_data; | 1142 | struct ib_cm_compare_data compare_data; |
@@ -1011,6 +1166,30 @@ static int cma_ib_listen(struct rdma_id_private *id_priv) | |||
1011 | return ret; | 1166 | return ret; |
1012 | } | 1167 | } |
1013 | 1168 | ||
1169 | static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) | ||
1170 | { | ||
1171 | int ret; | ||
1172 | struct sockaddr_in *sin; | ||
1173 | |||
1174 | id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device, | ||
1175 | iw_conn_req_handler, | ||
1176 | id_priv); | ||
1177 | if (IS_ERR(id_priv->cm_id.iw)) | ||
1178 | return PTR_ERR(id_priv->cm_id.iw); | ||
1179 | |||
1180 | sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; | ||
1181 | id_priv->cm_id.iw->local_addr = *sin; | ||
1182 | |||
1183 | ret = iw_cm_listen(id_priv->cm_id.iw, backlog); | ||
1184 | |||
1185 | if (ret) { | ||
1186 | iw_destroy_cm_id(id_priv->cm_id.iw); | ||
1187 | id_priv->cm_id.iw = NULL; | ||
1188 | } | ||
1189 | |||
1190 | return ret; | ||
1191 | } | ||
1192 | |||
1014 | static int cma_listen_handler(struct rdma_cm_id *id, | 1193 | static int cma_listen_handler(struct rdma_cm_id *id, |
1015 | struct rdma_cm_event *event) | 1194 | struct rdma_cm_event *event) |
1016 | { | 1195 | { |
@@ -1087,12 +1266,17 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) | |||
1087 | 1266 | ||
1088 | id_priv->backlog = backlog; | 1267 | id_priv->backlog = backlog; |
1089 | if (id->device) { | 1268 | if (id->device) { |
1090 | switch (id->device->node_type) { | 1269 | switch (rdma_node_get_transport(id->device->node_type)) { |
1091 | case IB_NODE_CA: | 1270 | case RDMA_TRANSPORT_IB: |
1092 | ret = cma_ib_listen(id_priv); | 1271 | ret = cma_ib_listen(id_priv); |
1093 | if (ret) | 1272 | if (ret) |
1094 | goto err; | 1273 | goto err; |
1095 | break; | 1274 | break; |
1275 | case RDMA_TRANSPORT_IWARP: | ||
1276 | ret = cma_iw_listen(id_priv, backlog); | ||
1277 | if (ret) | ||
1278 | goto err; | ||
1279 | break; | ||
1096 | default: | 1280 | default: |
1097 | ret = -ENOSYS; | 1281 | ret = -ENOSYS; |
1098 | goto err; | 1282 | goto err; |
@@ -1140,7 +1324,7 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, | |||
1140 | path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); | 1324 | path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); |
1141 | path_rec.numb_path = 1; | 1325 | path_rec.numb_path = 1; |
1142 | 1326 | ||
1143 | id_priv->query_id = ib_sa_path_rec_get(id_priv->id.device, | 1327 | id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, |
1144 | id_priv->id.port_num, &path_rec, | 1328 | id_priv->id.port_num, &path_rec, |
1145 | IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | | 1329 | IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | |
1146 | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH, | 1330 | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH, |
@@ -1231,6 +1415,23 @@ err: | |||
1231 | } | 1415 | } |
1232 | EXPORT_SYMBOL(rdma_set_ib_paths); | 1416 | EXPORT_SYMBOL(rdma_set_ib_paths); |
1233 | 1417 | ||
1418 | static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) | ||
1419 | { | ||
1420 | struct cma_work *work; | ||
1421 | |||
1422 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
1423 | if (!work) | ||
1424 | return -ENOMEM; | ||
1425 | |||
1426 | work->id = id_priv; | ||
1427 | INIT_WORK(&work->work, cma_work_handler, work); | ||
1428 | work->old_state = CMA_ROUTE_QUERY; | ||
1429 | work->new_state = CMA_ROUTE_RESOLVED; | ||
1430 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | ||
1431 | queue_work(cma_wq, &work->work); | ||
1432 | return 0; | ||
1433 | } | ||
1434 | |||
1234 | int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) | 1435 | int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) |
1235 | { | 1436 | { |
1236 | struct rdma_id_private *id_priv; | 1437 | struct rdma_id_private *id_priv; |
@@ -1241,10 +1442,13 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) | |||
1241 | return -EINVAL; | 1442 | return -EINVAL; |
1242 | 1443 | ||
1243 | atomic_inc(&id_priv->refcount); | 1444 | atomic_inc(&id_priv->refcount); |
1244 | switch (id->device->node_type) { | 1445 | switch (rdma_node_get_transport(id->device->node_type)) { |
1245 | case IB_NODE_CA: | 1446 | case RDMA_TRANSPORT_IB: |
1246 | ret = cma_resolve_ib_route(id_priv, timeout_ms); | 1447 | ret = cma_resolve_ib_route(id_priv, timeout_ms); |
1247 | break; | 1448 | break; |
1449 | case RDMA_TRANSPORT_IWARP: | ||
1450 | ret = cma_resolve_iw_route(id_priv, timeout_ms); | ||
1451 | break; | ||
1248 | default: | 1452 | default: |
1249 | ret = -ENOSYS; | 1453 | ret = -ENOSYS; |
1250 | break; | 1454 | break; |
@@ -1309,16 +1513,26 @@ static void addr_handler(int status, struct sockaddr *src_addr, | |||
1309 | enum rdma_cm_event_type event; | 1513 | enum rdma_cm_event_type event; |
1310 | 1514 | ||
1311 | atomic_inc(&id_priv->dev_remove); | 1515 | atomic_inc(&id_priv->dev_remove); |
1312 | if (!id_priv->cma_dev && !status) | 1516 | |
1517 | /* | ||
1518 | * Grab mutex to block rdma_destroy_id() from removing the device while | ||
1519 | * we're trying to acquire it. | ||
1520 | */ | ||
1521 | mutex_lock(&lock); | ||
1522 | if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) { | ||
1523 | mutex_unlock(&lock); | ||
1524 | goto out; | ||
1525 | } | ||
1526 | |||
1527 | if (!status && !id_priv->cma_dev) | ||
1313 | status = cma_acquire_dev(id_priv); | 1528 | status = cma_acquire_dev(id_priv); |
1529 | mutex_unlock(&lock); | ||
1314 | 1530 | ||
1315 | if (status) { | 1531 | if (status) { |
1316 | if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND)) | 1532 | if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) |
1317 | goto out; | 1533 | goto out; |
1318 | event = RDMA_CM_EVENT_ADDR_ERROR; | 1534 | event = RDMA_CM_EVENT_ADDR_ERROR; |
1319 | } else { | 1535 | } else { |
1320 | if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) | ||
1321 | goto out; | ||
1322 | memcpy(&id_priv->id.route.addr.src_addr, src_addr, | 1536 | memcpy(&id_priv->id.route.addr.src_addr, src_addr, |
1323 | ip_addr_size(src_addr)); | 1537 | ip_addr_size(src_addr)); |
1324 | event = RDMA_CM_EVENT_ADDR_RESOLVED; | 1538 | event = RDMA_CM_EVENT_ADDR_RESOLVED; |
@@ -1492,7 +1706,7 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) | |||
1492 | hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { | 1706 | hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { |
1493 | if (cma_any_addr(&cur_id->id.route.addr.src_addr)) | 1707 | if (cma_any_addr(&cur_id->id.route.addr.src_addr)) |
1494 | return -EADDRNOTAVAIL; | 1708 | return -EADDRNOTAVAIL; |
1495 | 1709 | ||
1496 | cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; | 1710 | cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; |
1497 | if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) | 1711 | if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) |
1498 | return -EADDRINUSE; | 1712 | return -EADDRINUSE; |
@@ -1542,8 +1756,11 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
1542 | 1756 | ||
1543 | if (!cma_any_addr(addr)) { | 1757 | if (!cma_any_addr(addr)) { |
1544 | ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); | 1758 | ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); |
1545 | if (!ret) | 1759 | if (!ret) { |
1760 | mutex_lock(&lock); | ||
1546 | ret = cma_acquire_dev(id_priv); | 1761 | ret = cma_acquire_dev(id_priv); |
1762 | mutex_unlock(&lock); | ||
1763 | } | ||
1547 | if (ret) | 1764 | if (ret) |
1548 | goto err; | 1765 | goto err; |
1549 | } | 1766 | } |
@@ -1649,6 +1866,47 @@ out: | |||
1649 | return ret; | 1866 | return ret; |
1650 | } | 1867 | } |
1651 | 1868 | ||
1869 | static int cma_connect_iw(struct rdma_id_private *id_priv, | ||
1870 | struct rdma_conn_param *conn_param) | ||
1871 | { | ||
1872 | struct iw_cm_id *cm_id; | ||
1873 | struct sockaddr_in* sin; | ||
1874 | int ret; | ||
1875 | struct iw_cm_conn_param iw_param; | ||
1876 | |||
1877 | cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); | ||
1878 | if (IS_ERR(cm_id)) { | ||
1879 | ret = PTR_ERR(cm_id); | ||
1880 | goto out; | ||
1881 | } | ||
1882 | |||
1883 | id_priv->cm_id.iw = cm_id; | ||
1884 | |||
1885 | sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr; | ||
1886 | cm_id->local_addr = *sin; | ||
1887 | |||
1888 | sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr; | ||
1889 | cm_id->remote_addr = *sin; | ||
1890 | |||
1891 | ret = cma_modify_qp_rtr(&id_priv->id); | ||
1892 | if (ret) { | ||
1893 | iw_destroy_cm_id(cm_id); | ||
1894 | return ret; | ||
1895 | } | ||
1896 | |||
1897 | iw_param.ord = conn_param->initiator_depth; | ||
1898 | iw_param.ird = conn_param->responder_resources; | ||
1899 | iw_param.private_data = conn_param->private_data; | ||
1900 | iw_param.private_data_len = conn_param->private_data_len; | ||
1901 | if (id_priv->id.qp) | ||
1902 | iw_param.qpn = id_priv->qp_num; | ||
1903 | else | ||
1904 | iw_param.qpn = conn_param->qp_num; | ||
1905 | ret = iw_cm_connect(cm_id, &iw_param); | ||
1906 | out: | ||
1907 | return ret; | ||
1908 | } | ||
1909 | |||
1652 | int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | 1910 | int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) |
1653 | { | 1911 | { |
1654 | struct rdma_id_private *id_priv; | 1912 | struct rdma_id_private *id_priv; |
@@ -1664,10 +1922,13 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
1664 | id_priv->srq = conn_param->srq; | 1922 | id_priv->srq = conn_param->srq; |
1665 | } | 1923 | } |
1666 | 1924 | ||
1667 | switch (id->device->node_type) { | 1925 | switch (rdma_node_get_transport(id->device->node_type)) { |
1668 | case IB_NODE_CA: | 1926 | case RDMA_TRANSPORT_IB: |
1669 | ret = cma_connect_ib(id_priv, conn_param); | 1927 | ret = cma_connect_ib(id_priv, conn_param); |
1670 | break; | 1928 | break; |
1929 | case RDMA_TRANSPORT_IWARP: | ||
1930 | ret = cma_connect_iw(id_priv, conn_param); | ||
1931 | break; | ||
1671 | default: | 1932 | default: |
1672 | ret = -ENOSYS; | 1933 | ret = -ENOSYS; |
1673 | break; | 1934 | break; |
@@ -1708,6 +1969,28 @@ static int cma_accept_ib(struct rdma_id_private *id_priv, | |||
1708 | return ib_send_cm_rep(id_priv->cm_id.ib, &rep); | 1969 | return ib_send_cm_rep(id_priv->cm_id.ib, &rep); |
1709 | } | 1970 | } |
1710 | 1971 | ||
1972 | static int cma_accept_iw(struct rdma_id_private *id_priv, | ||
1973 | struct rdma_conn_param *conn_param) | ||
1974 | { | ||
1975 | struct iw_cm_conn_param iw_param; | ||
1976 | int ret; | ||
1977 | |||
1978 | ret = cma_modify_qp_rtr(&id_priv->id); | ||
1979 | if (ret) | ||
1980 | return ret; | ||
1981 | |||
1982 | iw_param.ord = conn_param->initiator_depth; | ||
1983 | iw_param.ird = conn_param->responder_resources; | ||
1984 | iw_param.private_data = conn_param->private_data; | ||
1985 | iw_param.private_data_len = conn_param->private_data_len; | ||
1986 | if (id_priv->id.qp) { | ||
1987 | iw_param.qpn = id_priv->qp_num; | ||
1988 | } else | ||
1989 | iw_param.qpn = conn_param->qp_num; | ||
1990 | |||
1991 | return iw_cm_accept(id_priv->cm_id.iw, &iw_param); | ||
1992 | } | ||
1993 | |||
1711 | int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | 1994 | int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) |
1712 | { | 1995 | { |
1713 | struct rdma_id_private *id_priv; | 1996 | struct rdma_id_private *id_priv; |
@@ -1723,13 +2006,16 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
1723 | id_priv->srq = conn_param->srq; | 2006 | id_priv->srq = conn_param->srq; |
1724 | } | 2007 | } |
1725 | 2008 | ||
1726 | switch (id->device->node_type) { | 2009 | switch (rdma_node_get_transport(id->device->node_type)) { |
1727 | case IB_NODE_CA: | 2010 | case RDMA_TRANSPORT_IB: |
1728 | if (conn_param) | 2011 | if (conn_param) |
1729 | ret = cma_accept_ib(id_priv, conn_param); | 2012 | ret = cma_accept_ib(id_priv, conn_param); |
1730 | else | 2013 | else |
1731 | ret = cma_rep_recv(id_priv); | 2014 | ret = cma_rep_recv(id_priv); |
1732 | break; | 2015 | break; |
2016 | case RDMA_TRANSPORT_IWARP: | ||
2017 | ret = cma_accept_iw(id_priv, conn_param); | ||
2018 | break; | ||
1733 | default: | 2019 | default: |
1734 | ret = -ENOSYS; | 2020 | ret = -ENOSYS; |
1735 | break; | 2021 | break; |
@@ -1756,12 +2042,16 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, | |||
1756 | if (!cma_comp(id_priv, CMA_CONNECT)) | 2042 | if (!cma_comp(id_priv, CMA_CONNECT)) |
1757 | return -EINVAL; | 2043 | return -EINVAL; |
1758 | 2044 | ||
1759 | switch (id->device->node_type) { | 2045 | switch (rdma_node_get_transport(id->device->node_type)) { |
1760 | case IB_NODE_CA: | 2046 | case RDMA_TRANSPORT_IB: |
1761 | ret = ib_send_cm_rej(id_priv->cm_id.ib, | 2047 | ret = ib_send_cm_rej(id_priv->cm_id.ib, |
1762 | IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, | 2048 | IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, |
1763 | private_data, private_data_len); | 2049 | private_data, private_data_len); |
1764 | break; | 2050 | break; |
2051 | case RDMA_TRANSPORT_IWARP: | ||
2052 | ret = iw_cm_reject(id_priv->cm_id.iw, | ||
2053 | private_data, private_data_len); | ||
2054 | break; | ||
1765 | default: | 2055 | default: |
1766 | ret = -ENOSYS; | 2056 | ret = -ENOSYS; |
1767 | break; | 2057 | break; |
@@ -1780,17 +2070,20 @@ int rdma_disconnect(struct rdma_cm_id *id) | |||
1780 | !cma_comp(id_priv, CMA_DISCONNECT)) | 2070 | !cma_comp(id_priv, CMA_DISCONNECT)) |
1781 | return -EINVAL; | 2071 | return -EINVAL; |
1782 | 2072 | ||
1783 | ret = cma_modify_qp_err(id); | 2073 | switch (rdma_node_get_transport(id->device->node_type)) { |
1784 | if (ret) | 2074 | case RDMA_TRANSPORT_IB: |
1785 | goto out; | 2075 | ret = cma_modify_qp_err(id); |
1786 | 2076 | if (ret) | |
1787 | switch (id->device->node_type) { | 2077 | goto out; |
1788 | case IB_NODE_CA: | ||
1789 | /* Initiate or respond to a disconnect. */ | 2078 | /* Initiate or respond to a disconnect. */ |
1790 | if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) | 2079 | if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) |
1791 | ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); | 2080 | ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); |
1792 | break; | 2081 | break; |
2082 | case RDMA_TRANSPORT_IWARP: | ||
2083 | ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); | ||
2084 | break; | ||
1793 | default: | 2085 | default: |
2086 | ret = -EINVAL; | ||
1794 | break; | 2087 | break; |
1795 | } | 2088 | } |
1796 | out: | 2089 | out: |
@@ -1907,12 +2200,15 @@ static int cma_init(void) | |||
1907 | if (!cma_wq) | 2200 | if (!cma_wq) |
1908 | return -ENOMEM; | 2201 | return -ENOMEM; |
1909 | 2202 | ||
2203 | ib_sa_register_client(&sa_client); | ||
2204 | |||
1910 | ret = ib_register_client(&cma_client); | 2205 | ret = ib_register_client(&cma_client); |
1911 | if (ret) | 2206 | if (ret) |
1912 | goto err; | 2207 | goto err; |
1913 | return 0; | 2208 | return 0; |
1914 | 2209 | ||
1915 | err: | 2210 | err: |
2211 | ib_sa_unregister_client(&sa_client); | ||
1916 | destroy_workqueue(cma_wq); | 2212 | destroy_workqueue(cma_wq); |
1917 | return ret; | 2213 | return ret; |
1918 | } | 2214 | } |
@@ -1920,6 +2216,7 @@ err: | |||
1920 | static void cma_cleanup(void) | 2216 | static void cma_cleanup(void) |
1921 | { | 2217 | { |
1922 | ib_unregister_client(&cma_client); | 2218 | ib_unregister_client(&cma_client); |
2219 | ib_sa_unregister_client(&sa_client); | ||
1923 | destroy_workqueue(cma_wq); | 2220 | destroy_workqueue(cma_wq); |
1924 | idr_destroy(&sdp_ps); | 2221 | idr_destroy(&sdp_ps); |
1925 | idr_destroy(&tcp_ps); | 2222 | idr_destroy(&tcp_ps); |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index b2f3cb91d9bc..63d2a39fb82c 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
@@ -385,7 +385,7 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client) | |||
385 | EXPORT_SYMBOL(ib_get_client_data); | 385 | EXPORT_SYMBOL(ib_get_client_data); |
386 | 386 | ||
387 | /** | 387 | /** |
388 | * ib_set_client_data - Get IB client context | 388 | * ib_set_client_data - Set IB client context |
389 | * @device:Device to set context for | 389 | * @device:Device to set context for |
390 | * @client:Client to set context for | 390 | * @client:Client to set context for |
391 | * @data:Context to set | 391 | * @data:Context to set |
@@ -505,7 +505,7 @@ int ib_query_port(struct ib_device *device, | |||
505 | u8 port_num, | 505 | u8 port_num, |
506 | struct ib_port_attr *port_attr) | 506 | struct ib_port_attr *port_attr) |
507 | { | 507 | { |
508 | if (device->node_type == IB_NODE_SWITCH) { | 508 | if (device->node_type == RDMA_NODE_IB_SWITCH) { |
509 | if (port_num) | 509 | if (port_num) |
510 | return -EINVAL; | 510 | return -EINVAL; |
511 | } else if (port_num < 1 || port_num > device->phys_port_cnt) | 511 | } else if (port_num < 1 || port_num > device->phys_port_cnt) |
@@ -580,7 +580,7 @@ int ib_modify_port(struct ib_device *device, | |||
580 | u8 port_num, int port_modify_mask, | 580 | u8 port_num, int port_modify_mask, |
581 | struct ib_port_modify *port_modify) | 581 | struct ib_port_modify *port_modify) |
582 | { | 582 | { |
583 | if (device->node_type == IB_NODE_SWITCH) { | 583 | if (device->node_type == RDMA_NODE_IB_SWITCH) { |
584 | if (port_num) | 584 | if (port_num) |
585 | return -EINVAL; | 585 | return -EINVAL; |
586 | } else if (port_num < 1 || port_num > device->phys_port_cnt) | 586 | } else if (port_num < 1 || port_num > device->phys_port_cnt) |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c new file mode 100644 index 000000000000..c3fb304a4e86 --- /dev/null +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -0,0 +1,1019 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. | ||
3 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. | ||
4 | * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. | ||
5 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | ||
6 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
7 | * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. | ||
8 | * | ||
9 | * This software is available to you under a choice of one of two | ||
10 | * licenses. You may choose to be licensed under the terms of the GNU | ||
11 | * General Public License (GPL) Version 2, available from the file | ||
12 | * COPYING in the main directory of this source tree, or the | ||
13 | * OpenIB.org BSD license below: | ||
14 | * | ||
15 | * Redistribution and use in source and binary forms, with or | ||
16 | * without modification, are permitted provided that the following | ||
17 | * conditions are met: | ||
18 | * | ||
19 | * - Redistributions of source code must retain the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer. | ||
22 | * | ||
23 | * - Redistributions in binary form must reproduce the above | ||
24 | * copyright notice, this list of conditions and the following | ||
25 | * disclaimer in the documentation and/or other materials | ||
26 | * provided with the distribution. | ||
27 | * | ||
28 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
29 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
30 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
31 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
32 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
33 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
34 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
35 | * SOFTWARE. | ||
36 | * | ||
37 | */ | ||
38 | #include <linux/dma-mapping.h> | ||
39 | #include <linux/err.h> | ||
40 | #include <linux/idr.h> | ||
41 | #include <linux/interrupt.h> | ||
42 | #include <linux/pci.h> | ||
43 | #include <linux/rbtree.h> | ||
44 | #include <linux/spinlock.h> | ||
45 | #include <linux/workqueue.h> | ||
46 | #include <linux/completion.h> | ||
47 | |||
48 | #include <rdma/iw_cm.h> | ||
49 | #include <rdma/ib_addr.h> | ||
50 | |||
51 | #include "iwcm.h" | ||
52 | |||
53 | MODULE_AUTHOR("Tom Tucker"); | ||
54 | MODULE_DESCRIPTION("iWARP CM"); | ||
55 | MODULE_LICENSE("Dual BSD/GPL"); | ||
56 | |||
57 | static struct workqueue_struct *iwcm_wq; | ||
58 | struct iwcm_work { | ||
59 | struct work_struct work; | ||
60 | struct iwcm_id_private *cm_id; | ||
61 | struct list_head list; | ||
62 | struct iw_cm_event event; | ||
63 | struct list_head free_list; | ||
64 | }; | ||
65 | |||
66 | /* | ||
67 | * The following services provide a mechanism for pre-allocating iwcm_work | ||
68 | * elements. The design pre-allocates them based on the cm_id type: | ||
69 | * LISTENING IDS: Get enough elements preallocated to handle the | ||
70 | * listen backlog. | ||
71 | * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE | ||
72 | * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE | ||
73 | * | ||
74 | * Allocating them in connect and listen avoids having to deal | ||
75 | * with allocation failures on the event upcall from the provider (which | ||
76 | * is called in the interrupt context). | ||
77 | * | ||
78 | * One exception is when creating the cm_id for incoming connection requests. | ||
79 | * There are two cases: | ||
80 | * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If | ||
81 | * the backlog is exceeded, then no more connection request events will | ||
82 | * be processed. cm_event_handler() returns -ENOMEM in this case. Its up | ||
83 | * to the provider to reject the connectino request. | ||
84 | * 2) in the connection request workqueue handler, cm_conn_req_handler(). | ||
85 | * If work elements cannot be allocated for the new connect request cm_id, | ||
86 | * then IWCM will call the provider reject method. This is ok since | ||
87 | * cm_conn_req_handler() runs in the workqueue thread context. | ||
88 | */ | ||
89 | |||
90 | static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv) | ||
91 | { | ||
92 | struct iwcm_work *work; | ||
93 | |||
94 | if (list_empty(&cm_id_priv->work_free_list)) | ||
95 | return NULL; | ||
96 | work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work, | ||
97 | free_list); | ||
98 | list_del_init(&work->free_list); | ||
99 | return work; | ||
100 | } | ||
101 | |||
102 | static void put_work(struct iwcm_work *work) | ||
103 | { | ||
104 | list_add(&work->free_list, &work->cm_id->work_free_list); | ||
105 | } | ||
106 | |||
107 | static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv) | ||
108 | { | ||
109 | struct list_head *e, *tmp; | ||
110 | |||
111 | list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) | ||
112 | kfree(list_entry(e, struct iwcm_work, free_list)); | ||
113 | } | ||
114 | |||
115 | static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) | ||
116 | { | ||
117 | struct iwcm_work *work; | ||
118 | |||
119 | BUG_ON(!list_empty(&cm_id_priv->work_free_list)); | ||
120 | while (count--) { | ||
121 | work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL); | ||
122 | if (!work) { | ||
123 | dealloc_work_entries(cm_id_priv); | ||
124 | return -ENOMEM; | ||
125 | } | ||
126 | work->cm_id = cm_id_priv; | ||
127 | INIT_LIST_HEAD(&work->list); | ||
128 | put_work(work); | ||
129 | } | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * Save private data from incoming connection requests in the | ||
135 | * cm_id_priv so the low level driver doesn't have to. Adjust | ||
136 | * the event ptr to point to the local copy. | ||
137 | */ | ||
138 | static int copy_private_data(struct iwcm_id_private *cm_id_priv, | ||
139 | struct iw_cm_event *event) | ||
140 | { | ||
141 | void *p; | ||
142 | |||
143 | p = kmalloc(event->private_data_len, GFP_ATOMIC); | ||
144 | if (!p) | ||
145 | return -ENOMEM; | ||
146 | memcpy(p, event->private_data, event->private_data_len); | ||
147 | event->private_data = p; | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * Release a reference on cm_id. If the last reference is being removed | ||
153 | * and iw_destroy_cm_id is waiting, wake up the waiting thread. | ||
154 | */ | ||
155 | static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) | ||
156 | { | ||
157 | int ret = 0; | ||
158 | |||
159 | BUG_ON(atomic_read(&cm_id_priv->refcount)==0); | ||
160 | if (atomic_dec_and_test(&cm_id_priv->refcount)) { | ||
161 | BUG_ON(!list_empty(&cm_id_priv->work_list)); | ||
162 | if (waitqueue_active(&cm_id_priv->destroy_comp.wait)) { | ||
163 | BUG_ON(cm_id_priv->state != IW_CM_STATE_DESTROYING); | ||
164 | BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, | ||
165 | &cm_id_priv->flags)); | ||
166 | ret = 1; | ||
167 | } | ||
168 | complete(&cm_id_priv->destroy_comp); | ||
169 | } | ||
170 | |||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | static void add_ref(struct iw_cm_id *cm_id) | ||
175 | { | ||
176 | struct iwcm_id_private *cm_id_priv; | ||
177 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
178 | atomic_inc(&cm_id_priv->refcount); | ||
179 | } | ||
180 | |||
181 | static void rem_ref(struct iw_cm_id *cm_id) | ||
182 | { | ||
183 | struct iwcm_id_private *cm_id_priv; | ||
184 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
185 | iwcm_deref_id(cm_id_priv); | ||
186 | } | ||
187 | |||
188 | static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); | ||
189 | |||
190 | struct iw_cm_id *iw_create_cm_id(struct ib_device *device, | ||
191 | iw_cm_handler cm_handler, | ||
192 | void *context) | ||
193 | { | ||
194 | struct iwcm_id_private *cm_id_priv; | ||
195 | |||
196 | cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL); | ||
197 | if (!cm_id_priv) | ||
198 | return ERR_PTR(-ENOMEM); | ||
199 | |||
200 | cm_id_priv->state = IW_CM_STATE_IDLE; | ||
201 | cm_id_priv->id.device = device; | ||
202 | cm_id_priv->id.cm_handler = cm_handler; | ||
203 | cm_id_priv->id.context = context; | ||
204 | cm_id_priv->id.event_handler = cm_event_handler; | ||
205 | cm_id_priv->id.add_ref = add_ref; | ||
206 | cm_id_priv->id.rem_ref = rem_ref; | ||
207 | spin_lock_init(&cm_id_priv->lock); | ||
208 | atomic_set(&cm_id_priv->refcount, 1); | ||
209 | init_waitqueue_head(&cm_id_priv->connect_wait); | ||
210 | init_completion(&cm_id_priv->destroy_comp); | ||
211 | INIT_LIST_HEAD(&cm_id_priv->work_list); | ||
212 | INIT_LIST_HEAD(&cm_id_priv->work_free_list); | ||
213 | |||
214 | return &cm_id_priv->id; | ||
215 | } | ||
216 | EXPORT_SYMBOL(iw_create_cm_id); | ||
217 | |||
218 | |||
219 | static int iwcm_modify_qp_err(struct ib_qp *qp) | ||
220 | { | ||
221 | struct ib_qp_attr qp_attr; | ||
222 | |||
223 | if (!qp) | ||
224 | return -EINVAL; | ||
225 | |||
226 | qp_attr.qp_state = IB_QPS_ERR; | ||
227 | return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * This is really the RDMAC CLOSING state. It is most similar to the | ||
232 | * IB SQD QP state. | ||
233 | */ | ||
234 | static int iwcm_modify_qp_sqd(struct ib_qp *qp) | ||
235 | { | ||
236 | struct ib_qp_attr qp_attr; | ||
237 | |||
238 | BUG_ON(qp == NULL); | ||
239 | qp_attr.qp_state = IB_QPS_SQD; | ||
240 | return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * CM_ID <-- CLOSING | ||
245 | * | ||
246 | * Block if a passive or active connection is currenlty being processed. Then | ||
247 | * process the event as follows: | ||
248 | * - If we are ESTABLISHED, move to CLOSING and modify the QP state | ||
249 | * based on the abrupt flag | ||
250 | * - If the connection is already in the CLOSING or IDLE state, the peer is | ||
251 | * disconnecting concurrently with us and we've already seen the | ||
252 | * DISCONNECT event -- ignore the request and return 0 | ||
253 | * - Disconnect on a listening endpoint returns -EINVAL | ||
254 | */ | ||
255 | int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) | ||
256 | { | ||
257 | struct iwcm_id_private *cm_id_priv; | ||
258 | unsigned long flags; | ||
259 | int ret = 0; | ||
260 | struct ib_qp *qp = NULL; | ||
261 | |||
262 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
263 | /* Wait if we're currently in a connect or accept downcall */ | ||
264 | wait_event(cm_id_priv->connect_wait, | ||
265 | !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); | ||
266 | |||
267 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
268 | switch (cm_id_priv->state) { | ||
269 | case IW_CM_STATE_ESTABLISHED: | ||
270 | cm_id_priv->state = IW_CM_STATE_CLOSING; | ||
271 | |||
272 | /* QP could be <nul> for user-mode client */ | ||
273 | if (cm_id_priv->qp) | ||
274 | qp = cm_id_priv->qp; | ||
275 | else | ||
276 | ret = -EINVAL; | ||
277 | break; | ||
278 | case IW_CM_STATE_LISTEN: | ||
279 | ret = -EINVAL; | ||
280 | break; | ||
281 | case IW_CM_STATE_CLOSING: | ||
282 | /* remote peer closed first */ | ||
283 | case IW_CM_STATE_IDLE: | ||
284 | /* accept or connect returned !0 */ | ||
285 | break; | ||
286 | case IW_CM_STATE_CONN_RECV: | ||
287 | /* | ||
288 | * App called disconnect before/without calling accept after | ||
289 | * connect_request event delivered. | ||
290 | */ | ||
291 | break; | ||
292 | case IW_CM_STATE_CONN_SENT: | ||
293 | /* Can only get here if wait above fails */ | ||
294 | default: | ||
295 | BUG(); | ||
296 | } | ||
297 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
298 | |||
299 | if (qp) { | ||
300 | if (abrupt) | ||
301 | ret = iwcm_modify_qp_err(qp); | ||
302 | else | ||
303 | ret = iwcm_modify_qp_sqd(qp); | ||
304 | |||
305 | /* | ||
306 | * If both sides are disconnecting the QP could | ||
307 | * already be in ERR or SQD states | ||
308 | */ | ||
309 | ret = 0; | ||
310 | } | ||
311 | |||
312 | return ret; | ||
313 | } | ||
314 | EXPORT_SYMBOL(iw_cm_disconnect); | ||
315 | |||
316 | /* | ||
317 | * CM_ID <-- DESTROYING | ||
318 | * | ||
319 | * Clean up all resources associated with the connection and release | ||
320 | * the initial reference taken by iw_create_cm_id. | ||
321 | */ | ||
322 | static void destroy_cm_id(struct iw_cm_id *cm_id) | ||
323 | { | ||
324 | struct iwcm_id_private *cm_id_priv; | ||
325 | unsigned long flags; | ||
326 | int ret; | ||
327 | |||
328 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
329 | /* | ||
330 | * Wait if we're currently in a connect or accept downcall. A | ||
331 | * listening endpoint should never block here. | ||
332 | */ | ||
333 | wait_event(cm_id_priv->connect_wait, | ||
334 | !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); | ||
335 | |||
336 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
337 | switch (cm_id_priv->state) { | ||
338 | case IW_CM_STATE_LISTEN: | ||
339 | cm_id_priv->state = IW_CM_STATE_DESTROYING; | ||
340 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
341 | /* destroy the listening endpoint */ | ||
342 | ret = cm_id->device->iwcm->destroy_listen(cm_id); | ||
343 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
344 | break; | ||
345 | case IW_CM_STATE_ESTABLISHED: | ||
346 | cm_id_priv->state = IW_CM_STATE_DESTROYING; | ||
347 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
348 | /* Abrupt close of the connection */ | ||
349 | (void)iwcm_modify_qp_err(cm_id_priv->qp); | ||
350 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
351 | break; | ||
352 | case IW_CM_STATE_IDLE: | ||
353 | case IW_CM_STATE_CLOSING: | ||
354 | cm_id_priv->state = IW_CM_STATE_DESTROYING; | ||
355 | break; | ||
356 | case IW_CM_STATE_CONN_RECV: | ||
357 | /* | ||
358 | * App called destroy before/without calling accept after | ||
359 | * receiving connection request event notification. | ||
360 | */ | ||
361 | cm_id_priv->state = IW_CM_STATE_DESTROYING; | ||
362 | break; | ||
363 | case IW_CM_STATE_CONN_SENT: | ||
364 | case IW_CM_STATE_DESTROYING: | ||
365 | default: | ||
366 | BUG(); | ||
367 | break; | ||
368 | } | ||
369 | if (cm_id_priv->qp) { | ||
370 | cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); | ||
371 | cm_id_priv->qp = NULL; | ||
372 | } | ||
373 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
374 | |||
375 | (void)iwcm_deref_id(cm_id_priv); | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * This function is only called by the application thread and cannot | ||
380 | * be called by the event thread. The function will wait for all | ||
381 | * references to be released on the cm_id and then kfree the cm_id | ||
382 | * object. | ||
383 | */ | ||
384 | void iw_destroy_cm_id(struct iw_cm_id *cm_id) | ||
385 | { | ||
386 | struct iwcm_id_private *cm_id_priv; | ||
387 | |||
388 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
389 | BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)); | ||
390 | |||
391 | destroy_cm_id(cm_id); | ||
392 | |||
393 | wait_for_completion(&cm_id_priv->destroy_comp); | ||
394 | |||
395 | dealloc_work_entries(cm_id_priv); | ||
396 | |||
397 | kfree(cm_id_priv); | ||
398 | } | ||
399 | EXPORT_SYMBOL(iw_destroy_cm_id); | ||
400 | |||
401 | /* | ||
402 | * CM_ID <-- LISTEN | ||
403 | * | ||
404 | * Start listening for connect requests. Generates one CONNECT_REQUEST | ||
405 | * event for each inbound connect request. | ||
406 | */ | ||
407 | int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) | ||
408 | { | ||
409 | struct iwcm_id_private *cm_id_priv; | ||
410 | unsigned long flags; | ||
411 | int ret = 0; | ||
412 | |||
413 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
414 | |||
415 | ret = alloc_work_entries(cm_id_priv, backlog); | ||
416 | if (ret) | ||
417 | return ret; | ||
418 | |||
419 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
420 | switch (cm_id_priv->state) { | ||
421 | case IW_CM_STATE_IDLE: | ||
422 | cm_id_priv->state = IW_CM_STATE_LISTEN; | ||
423 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
424 | ret = cm_id->device->iwcm->create_listen(cm_id, backlog); | ||
425 | if (ret) | ||
426 | cm_id_priv->state = IW_CM_STATE_IDLE; | ||
427 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
428 | break; | ||
429 | default: | ||
430 | ret = -EINVAL; | ||
431 | } | ||
432 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
433 | |||
434 | return ret; | ||
435 | } | ||
436 | EXPORT_SYMBOL(iw_cm_listen); | ||
437 | |||
438 | /* | ||
439 | * CM_ID <-- IDLE | ||
440 | * | ||
441 | * Rejects an inbound connection request. No events are generated. | ||
442 | */ | ||
443 | int iw_cm_reject(struct iw_cm_id *cm_id, | ||
444 | const void *private_data, | ||
445 | u8 private_data_len) | ||
446 | { | ||
447 | struct iwcm_id_private *cm_id_priv; | ||
448 | unsigned long flags; | ||
449 | int ret; | ||
450 | |||
451 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
452 | set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
453 | |||
454 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
455 | if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { | ||
456 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
457 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
458 | wake_up_all(&cm_id_priv->connect_wait); | ||
459 | return -EINVAL; | ||
460 | } | ||
461 | cm_id_priv->state = IW_CM_STATE_IDLE; | ||
462 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
463 | |||
464 | ret = cm_id->device->iwcm->reject(cm_id, private_data, | ||
465 | private_data_len); | ||
466 | |||
467 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
468 | wake_up_all(&cm_id_priv->connect_wait); | ||
469 | |||
470 | return ret; | ||
471 | } | ||
472 | EXPORT_SYMBOL(iw_cm_reject); | ||
473 | |||
474 | /* | ||
475 | * CM_ID <-- ESTABLISHED | ||
476 | * | ||
477 | * Accepts an inbound connection request and generates an ESTABLISHED | ||
478 | * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block | ||
479 | * until the ESTABLISHED event is received from the provider. | ||
480 | */ | ||
481 | int iw_cm_accept(struct iw_cm_id *cm_id, | ||
482 | struct iw_cm_conn_param *iw_param) | ||
483 | { | ||
484 | struct iwcm_id_private *cm_id_priv; | ||
485 | struct ib_qp *qp; | ||
486 | unsigned long flags; | ||
487 | int ret; | ||
488 | |||
489 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
490 | set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
491 | |||
492 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
493 | if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { | ||
494 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
495 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
496 | wake_up_all(&cm_id_priv->connect_wait); | ||
497 | return -EINVAL; | ||
498 | } | ||
499 | /* Get the ib_qp given the QPN */ | ||
500 | qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); | ||
501 | if (!qp) { | ||
502 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
503 | return -EINVAL; | ||
504 | } | ||
505 | cm_id->device->iwcm->add_ref(qp); | ||
506 | cm_id_priv->qp = qp; | ||
507 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
508 | |||
509 | ret = cm_id->device->iwcm->accept(cm_id, iw_param); | ||
510 | if (ret) { | ||
511 | /* An error on accept precludes provider events */ | ||
512 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); | ||
513 | cm_id_priv->state = IW_CM_STATE_IDLE; | ||
514 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
515 | if (cm_id_priv->qp) { | ||
516 | cm_id->device->iwcm->rem_ref(qp); | ||
517 | cm_id_priv->qp = NULL; | ||
518 | } | ||
519 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
520 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
521 | wake_up_all(&cm_id_priv->connect_wait); | ||
522 | } | ||
523 | |||
524 | return ret; | ||
525 | } | ||
526 | EXPORT_SYMBOL(iw_cm_accept); | ||
527 | |||
528 | /* | ||
529 | * Active Side: CM_ID <-- CONN_SENT | ||
530 | * | ||
531 | * If successful, results in the generation of a CONNECT_REPLY | ||
532 | * event. iw_cm_disconnect and iw_cm_destroy will block until the | ||
533 | * CONNECT_REPLY event is received from the provider. | ||
534 | */ | ||
535 | int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | ||
536 | { | ||
537 | struct iwcm_id_private *cm_id_priv; | ||
538 | int ret = 0; | ||
539 | unsigned long flags; | ||
540 | struct ib_qp *qp; | ||
541 | |||
542 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
543 | |||
544 | ret = alloc_work_entries(cm_id_priv, 4); | ||
545 | if (ret) | ||
546 | return ret; | ||
547 | |||
548 | set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
549 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
550 | |||
551 | if (cm_id_priv->state != IW_CM_STATE_IDLE) { | ||
552 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
553 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
554 | wake_up_all(&cm_id_priv->connect_wait); | ||
555 | return -EINVAL; | ||
556 | } | ||
557 | |||
558 | /* Get the ib_qp given the QPN */ | ||
559 | qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); | ||
560 | if (!qp) { | ||
561 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
562 | return -EINVAL; | ||
563 | } | ||
564 | cm_id->device->iwcm->add_ref(qp); | ||
565 | cm_id_priv->qp = qp; | ||
566 | cm_id_priv->state = IW_CM_STATE_CONN_SENT; | ||
567 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
568 | |||
569 | ret = cm_id->device->iwcm->connect(cm_id, iw_param); | ||
570 | if (ret) { | ||
571 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
572 | if (cm_id_priv->qp) { | ||
573 | cm_id->device->iwcm->rem_ref(qp); | ||
574 | cm_id_priv->qp = NULL; | ||
575 | } | ||
576 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
577 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); | ||
578 | cm_id_priv->state = IW_CM_STATE_IDLE; | ||
579 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
580 | wake_up_all(&cm_id_priv->connect_wait); | ||
581 | } | ||
582 | |||
583 | return ret; | ||
584 | } | ||
585 | EXPORT_SYMBOL(iw_cm_connect); | ||
586 | |||
587 | /* | ||
588 | * Passive Side: new CM_ID <-- CONN_RECV | ||
589 | * | ||
590 | * Handles an inbound connect request. The function creates a new | ||
591 | * iw_cm_id to represent the new connection and inherits the client | ||
592 | * callback function and other attributes from the listening parent. | ||
593 | * | ||
594 | * The work item contains a pointer to the listen_cm_id and the event. The | ||
595 | * listen_cm_id contains the client cm_handler, context and | ||
596 | * device. These are copied when the device is cloned. The event | ||
597 | * contains the new four tuple. | ||
598 | * | ||
599 | * An error on the child should not affect the parent, so this | ||
600 | * function does not return a value. | ||
601 | */ | ||
602 | static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | ||
603 | struct iw_cm_event *iw_event) | ||
604 | { | ||
605 | unsigned long flags; | ||
606 | struct iw_cm_id *cm_id; | ||
607 | struct iwcm_id_private *cm_id_priv; | ||
608 | int ret; | ||
609 | |||
610 | /* | ||
611 | * The provider should never generate a connection request | ||
612 | * event with a bad status. | ||
613 | */ | ||
614 | BUG_ON(iw_event->status); | ||
615 | |||
616 | /* | ||
617 | * We could be destroying the listening id. If so, ignore this | ||
618 | * upcall. | ||
619 | */ | ||
620 | spin_lock_irqsave(&listen_id_priv->lock, flags); | ||
621 | if (listen_id_priv->state != IW_CM_STATE_LISTEN) { | ||
622 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
623 | return; | ||
624 | } | ||
625 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
626 | |||
627 | cm_id = iw_create_cm_id(listen_id_priv->id.device, | ||
628 | listen_id_priv->id.cm_handler, | ||
629 | listen_id_priv->id.context); | ||
630 | /* If the cm_id could not be created, ignore the request */ | ||
631 | if (IS_ERR(cm_id)) | ||
632 | return; | ||
633 | |||
634 | cm_id->provider_data = iw_event->provider_data; | ||
635 | cm_id->local_addr = iw_event->local_addr; | ||
636 | cm_id->remote_addr = iw_event->remote_addr; | ||
637 | |||
638 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
639 | cm_id_priv->state = IW_CM_STATE_CONN_RECV; | ||
640 | |||
641 | ret = alloc_work_entries(cm_id_priv, 3); | ||
642 | if (ret) { | ||
643 | iw_cm_reject(cm_id, NULL, 0); | ||
644 | iw_destroy_cm_id(cm_id); | ||
645 | return; | ||
646 | } | ||
647 | |||
648 | /* Call the client CM handler */ | ||
649 | ret = cm_id->cm_handler(cm_id, iw_event); | ||
650 | if (ret) { | ||
651 | set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | ||
652 | destroy_cm_id(cm_id); | ||
653 | if (atomic_read(&cm_id_priv->refcount)==0) | ||
654 | kfree(cm_id); | ||
655 | } | ||
656 | |||
657 | if (iw_event->private_data_len) | ||
658 | kfree(iw_event->private_data); | ||
659 | } | ||
660 | |||
661 | /* | ||
662 | * Passive Side: CM_ID <-- ESTABLISHED | ||
663 | * | ||
664 | * The provider generated an ESTABLISHED event which means that | ||
665 | * the MPA negotion has completed successfully and we are now in MPA | ||
666 | * FPDU mode. | ||
667 | * | ||
668 | * This event can only be received in the CONN_RECV state. If the | ||
669 | * remote peer closed, the ESTABLISHED event would be received followed | ||
670 | * by the CLOSE event. If the app closes, it will block until we wake | ||
671 | * it up after processing this event. | ||
672 | */ | ||
673 | static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv, | ||
674 | struct iw_cm_event *iw_event) | ||
675 | { | ||
676 | unsigned long flags; | ||
677 | int ret = 0; | ||
678 | |||
679 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
680 | |||
681 | /* | ||
682 | * We clear the CONNECT_WAIT bit here to allow the callback | ||
683 | * function to call iw_cm_disconnect. Calling iw_destroy_cm_id | ||
684 | * from a callback handler is not allowed. | ||
685 | */ | ||
686 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
687 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); | ||
688 | cm_id_priv->state = IW_CM_STATE_ESTABLISHED; | ||
689 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
690 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); | ||
691 | wake_up_all(&cm_id_priv->connect_wait); | ||
692 | |||
693 | return ret; | ||
694 | } | ||
695 | |||
696 | /* | ||
697 | * Active Side: CM_ID <-- ESTABLISHED | ||
698 | * | ||
699 | * The app has called connect and is waiting for the established event to | ||
700 | * post it's requests to the server. This event will wake up anyone | ||
701 | * blocked in iw_cm_disconnect or iw_destroy_id. | ||
702 | */ | ||
703 | static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, | ||
704 | struct iw_cm_event *iw_event) | ||
705 | { | ||
706 | unsigned long flags; | ||
707 | int ret = 0; | ||
708 | |||
709 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
710 | /* | ||
711 | * Clear the connect wait bit so a callback function calling | ||
712 | * iw_cm_disconnect will not wait and deadlock this thread | ||
713 | */ | ||
714 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
715 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); | ||
716 | if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) { | ||
717 | cm_id_priv->id.local_addr = iw_event->local_addr; | ||
718 | cm_id_priv->id.remote_addr = iw_event->remote_addr; | ||
719 | cm_id_priv->state = IW_CM_STATE_ESTABLISHED; | ||
720 | } else { | ||
721 | /* REJECTED or RESET */ | ||
722 | cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); | ||
723 | cm_id_priv->qp = NULL; | ||
724 | cm_id_priv->state = IW_CM_STATE_IDLE; | ||
725 | } | ||
726 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
727 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); | ||
728 | |||
729 | if (iw_event->private_data_len) | ||
730 | kfree(iw_event->private_data); | ||
731 | |||
732 | /* Wake up waiters on connect complete */ | ||
733 | wake_up_all(&cm_id_priv->connect_wait); | ||
734 | |||
735 | return ret; | ||
736 | } | ||
737 | |||
738 | /* | ||
739 | * CM_ID <-- CLOSING | ||
740 | * | ||
741 | * If in the ESTABLISHED state, move to CLOSING. | ||
742 | */ | ||
743 | static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv, | ||
744 | struct iw_cm_event *iw_event) | ||
745 | { | ||
746 | unsigned long flags; | ||
747 | |||
748 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
749 | if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED) | ||
750 | cm_id_priv->state = IW_CM_STATE_CLOSING; | ||
751 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
752 | } | ||
753 | |||
754 | /* | ||
755 | * CM_ID <-- IDLE | ||
756 | * | ||
757 | * If in the ESTBLISHED or CLOSING states, the QP will have have been | ||
758 | * moved by the provider to the ERR state. Disassociate the CM_ID from | ||
759 | * the QP, move to IDLE, and remove the 'connected' reference. | ||
760 | * | ||
761 | * If in some other state, the cm_id was destroyed asynchronously. | ||
762 | * This is the last reference that will result in waking up | ||
763 | * the app thread blocked in iw_destroy_cm_id. | ||
764 | */ | ||
765 | static int cm_close_handler(struct iwcm_id_private *cm_id_priv, | ||
766 | struct iw_cm_event *iw_event) | ||
767 | { | ||
768 | unsigned long flags; | ||
769 | int ret = 0; | ||
770 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
771 | |||
772 | if (cm_id_priv->qp) { | ||
773 | cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); | ||
774 | cm_id_priv->qp = NULL; | ||
775 | } | ||
776 | switch (cm_id_priv->state) { | ||
777 | case IW_CM_STATE_ESTABLISHED: | ||
778 | case IW_CM_STATE_CLOSING: | ||
779 | cm_id_priv->state = IW_CM_STATE_IDLE; | ||
780 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
781 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); | ||
782 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
783 | break; | ||
784 | case IW_CM_STATE_DESTROYING: | ||
785 | break; | ||
786 | default: | ||
787 | BUG(); | ||
788 | } | ||
789 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
790 | |||
791 | return ret; | ||
792 | } | ||
793 | |||
794 | static int process_event(struct iwcm_id_private *cm_id_priv, | ||
795 | struct iw_cm_event *iw_event) | ||
796 | { | ||
797 | int ret = 0; | ||
798 | |||
799 | switch (iw_event->event) { | ||
800 | case IW_CM_EVENT_CONNECT_REQUEST: | ||
801 | cm_conn_req_handler(cm_id_priv, iw_event); | ||
802 | break; | ||
803 | case IW_CM_EVENT_CONNECT_REPLY: | ||
804 | ret = cm_conn_rep_handler(cm_id_priv, iw_event); | ||
805 | break; | ||
806 | case IW_CM_EVENT_ESTABLISHED: | ||
807 | ret = cm_conn_est_handler(cm_id_priv, iw_event); | ||
808 | break; | ||
809 | case IW_CM_EVENT_DISCONNECT: | ||
810 | cm_disconnect_handler(cm_id_priv, iw_event); | ||
811 | break; | ||
812 | case IW_CM_EVENT_CLOSE: | ||
813 | ret = cm_close_handler(cm_id_priv, iw_event); | ||
814 | break; | ||
815 | default: | ||
816 | BUG(); | ||
817 | } | ||
818 | |||
819 | return ret; | ||
820 | } | ||
821 | |||
822 | /* | ||
823 | * Process events on the work_list for the cm_id. If the callback | ||
824 | * function requests that the cm_id be deleted, a flag is set in the | ||
825 | * cm_id flags to indicate that when the last reference is | ||
826 | * removed, the cm_id is to be destroyed. This is necessary to | ||
827 | * distinguish between an object that will be destroyed by the app | ||
828 | * thread asleep on the destroy_comp list vs. an object destroyed | ||
829 | * here synchronously when the last reference is removed. | ||
830 | */ | ||
831 | static void cm_work_handler(void *arg) | ||
832 | { | ||
833 | struct iwcm_work *work = arg, lwork; | ||
834 | struct iwcm_id_private *cm_id_priv = work->cm_id; | ||
835 | unsigned long flags; | ||
836 | int empty; | ||
837 | int ret = 0; | ||
838 | |||
839 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
840 | empty = list_empty(&cm_id_priv->work_list); | ||
841 | while (!empty) { | ||
842 | work = list_entry(cm_id_priv->work_list.next, | ||
843 | struct iwcm_work, list); | ||
844 | list_del_init(&work->list); | ||
845 | empty = list_empty(&cm_id_priv->work_list); | ||
846 | lwork = *work; | ||
847 | put_work(work); | ||
848 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
849 | |||
850 | ret = process_event(cm_id_priv, &work->event); | ||
851 | if (ret) { | ||
852 | set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | ||
853 | destroy_cm_id(&cm_id_priv->id); | ||
854 | } | ||
855 | BUG_ON(atomic_read(&cm_id_priv->refcount)==0); | ||
856 | if (iwcm_deref_id(cm_id_priv)) | ||
857 | return; | ||
858 | |||
859 | if (atomic_read(&cm_id_priv->refcount)==0 && | ||
860 | test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { | ||
861 | dealloc_work_entries(cm_id_priv); | ||
862 | kfree(cm_id_priv); | ||
863 | return; | ||
864 | } | ||
865 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
866 | } | ||
867 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
868 | } | ||
869 | |||
870 | /* | ||
871 | * This function is called on interrupt context. Schedule events on | ||
872 | * the iwcm_wq thread to allow callback functions to downcall into | ||
873 | * the CM and/or block. Events are queued to a per-CM_ID | ||
874 | * work_list. If this is the first event on the work_list, the work | ||
875 | * element is also queued on the iwcm_wq thread. | ||
876 | * | ||
877 | * Each event holds a reference on the cm_id. Until the last posted | ||
878 | * event has been delivered and processed, the cm_id cannot be | ||
879 | * deleted. | ||
880 | * | ||
881 | * Returns: | ||
882 | * 0 - the event was handled. | ||
883 | * -ENOMEM - the event was not handled due to lack of resources. | ||
884 | */ | ||
885 | static int cm_event_handler(struct iw_cm_id *cm_id, | ||
886 | struct iw_cm_event *iw_event) | ||
887 | { | ||
888 | struct iwcm_work *work; | ||
889 | struct iwcm_id_private *cm_id_priv; | ||
890 | unsigned long flags; | ||
891 | int ret = 0; | ||
892 | |||
893 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
894 | |||
895 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
896 | work = get_work(cm_id_priv); | ||
897 | if (!work) { | ||
898 | ret = -ENOMEM; | ||
899 | goto out; | ||
900 | } | ||
901 | |||
902 | INIT_WORK(&work->work, cm_work_handler, work); | ||
903 | work->cm_id = cm_id_priv; | ||
904 | work->event = *iw_event; | ||
905 | |||
906 | if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || | ||
907 | work->event.event == IW_CM_EVENT_CONNECT_REPLY) && | ||
908 | work->event.private_data_len) { | ||
909 | ret = copy_private_data(cm_id_priv, &work->event); | ||
910 | if (ret) { | ||
911 | put_work(work); | ||
912 | goto out; | ||
913 | } | ||
914 | } | ||
915 | |||
916 | atomic_inc(&cm_id_priv->refcount); | ||
917 | if (list_empty(&cm_id_priv->work_list)) { | ||
918 | list_add_tail(&work->list, &cm_id_priv->work_list); | ||
919 | queue_work(iwcm_wq, &work->work); | ||
920 | } else | ||
921 | list_add_tail(&work->list, &cm_id_priv->work_list); | ||
922 | out: | ||
923 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
924 | return ret; | ||
925 | } | ||
926 | |||
927 | static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv, | ||
928 | struct ib_qp_attr *qp_attr, | ||
929 | int *qp_attr_mask) | ||
930 | { | ||
931 | unsigned long flags; | ||
932 | int ret; | ||
933 | |||
934 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
935 | switch (cm_id_priv->state) { | ||
936 | case IW_CM_STATE_IDLE: | ||
937 | case IW_CM_STATE_CONN_SENT: | ||
938 | case IW_CM_STATE_CONN_RECV: | ||
939 | case IW_CM_STATE_ESTABLISHED: | ||
940 | *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; | ||
941 | qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | | ||
942 | IB_ACCESS_REMOTE_WRITE| | ||
943 | IB_ACCESS_REMOTE_READ; | ||
944 | ret = 0; | ||
945 | break; | ||
946 | default: | ||
947 | ret = -EINVAL; | ||
948 | break; | ||
949 | } | ||
950 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
951 | return ret; | ||
952 | } | ||
953 | |||
954 | static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv, | ||
955 | struct ib_qp_attr *qp_attr, | ||
956 | int *qp_attr_mask) | ||
957 | { | ||
958 | unsigned long flags; | ||
959 | int ret; | ||
960 | |||
961 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
962 | switch (cm_id_priv->state) { | ||
963 | case IW_CM_STATE_IDLE: | ||
964 | case IW_CM_STATE_CONN_SENT: | ||
965 | case IW_CM_STATE_CONN_RECV: | ||
966 | case IW_CM_STATE_ESTABLISHED: | ||
967 | *qp_attr_mask = 0; | ||
968 | ret = 0; | ||
969 | break; | ||
970 | default: | ||
971 | ret = -EINVAL; | ||
972 | break; | ||
973 | } | ||
974 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
975 | return ret; | ||
976 | } | ||
977 | |||
978 | int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, | ||
979 | struct ib_qp_attr *qp_attr, | ||
980 | int *qp_attr_mask) | ||
981 | { | ||
982 | struct iwcm_id_private *cm_id_priv; | ||
983 | int ret; | ||
984 | |||
985 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
986 | switch (qp_attr->qp_state) { | ||
987 | case IB_QPS_INIT: | ||
988 | case IB_QPS_RTR: | ||
989 | ret = iwcm_init_qp_init_attr(cm_id_priv, | ||
990 | qp_attr, qp_attr_mask); | ||
991 | break; | ||
992 | case IB_QPS_RTS: | ||
993 | ret = iwcm_init_qp_rts_attr(cm_id_priv, | ||
994 | qp_attr, qp_attr_mask); | ||
995 | break; | ||
996 | default: | ||
997 | ret = -EINVAL; | ||
998 | break; | ||
999 | } | ||
1000 | return ret; | ||
1001 | } | ||
1002 | EXPORT_SYMBOL(iw_cm_init_qp_attr); | ||
1003 | |||
1004 | static int __init iw_cm_init(void) | ||
1005 | { | ||
1006 | iwcm_wq = create_singlethread_workqueue("iw_cm_wq"); | ||
1007 | if (!iwcm_wq) | ||
1008 | return -ENOMEM; | ||
1009 | |||
1010 | return 0; | ||
1011 | } | ||
1012 | |||
1013 | static void __exit iw_cm_cleanup(void) | ||
1014 | { | ||
1015 | destroy_workqueue(iwcm_wq); | ||
1016 | } | ||
1017 | |||
1018 | module_init(iw_cm_init); | ||
1019 | module_exit(iw_cm_cleanup); | ||
diff --git a/drivers/infiniband/core/iwcm.h b/drivers/infiniband/core/iwcm.h new file mode 100644 index 000000000000..3f6cc82564c8 --- /dev/null +++ b/drivers/infiniband/core/iwcm.h | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #ifndef IWCM_H | ||
34 | #define IWCM_H | ||
35 | |||
36 | enum iw_cm_state { | ||
37 | IW_CM_STATE_IDLE, /* unbound, inactive */ | ||
38 | IW_CM_STATE_LISTEN, /* listen waiting for connect */ | ||
39 | IW_CM_STATE_CONN_RECV, /* inbound waiting for user accept */ | ||
40 | IW_CM_STATE_CONN_SENT, /* outbound waiting for peer accept */ | ||
41 | IW_CM_STATE_ESTABLISHED, /* established */ | ||
42 | IW_CM_STATE_CLOSING, /* disconnect */ | ||
43 | IW_CM_STATE_DESTROYING /* object being deleted */ | ||
44 | }; | ||
45 | |||
46 | struct iwcm_id_private { | ||
47 | struct iw_cm_id id; | ||
48 | enum iw_cm_state state; | ||
49 | unsigned long flags; | ||
50 | struct ib_qp *qp; | ||
51 | struct completion destroy_comp; | ||
52 | wait_queue_head_t connect_wait; | ||
53 | struct list_head work_list; | ||
54 | spinlock_t lock; | ||
55 | atomic_t refcount; | ||
56 | struct list_head work_free_list; | ||
57 | }; | ||
58 | |||
59 | #define IWCM_F_CALLBACK_DESTROY 1 | ||
60 | #define IWCM_F_CONNECT_WAIT 2 | ||
61 | |||
62 | #endif /* IWCM_H */ | ||
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 1c3cfbbe6a97..082f03c158f0 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -1246,8 +1246,8 @@ static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, | |||
1246 | int i; | 1246 | int i; |
1247 | 1247 | ||
1248 | for (i = 0; i < MAX_MGMT_OUI; i++) | 1248 | for (i = 0; i < MAX_MGMT_OUI; i++) |
1249 | /* Is there matching OUI for this vendor class ? */ | 1249 | /* Is there matching OUI for this vendor class ? */ |
1250 | if (!memcmp(vendor_class->oui[i], oui, 3)) | 1250 | if (!memcmp(vendor_class->oui[i], oui, 3)) |
1251 | return i; | 1251 | return i; |
1252 | 1252 | ||
1253 | return -1; | 1253 | return -1; |
@@ -2237,7 +2237,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) | |||
2237 | list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, | 2237 | list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, |
2238 | &mad_agent_priv->send_list, agent_list) { | 2238 | &mad_agent_priv->send_list, agent_list) { |
2239 | if (mad_send_wr->status == IB_WC_SUCCESS) { | 2239 | if (mad_send_wr->status == IB_WC_SUCCESS) { |
2240 | mad_send_wr->status = IB_WC_WR_FLUSH_ERR; | 2240 | mad_send_wr->status = IB_WC_WR_FLUSH_ERR; |
2241 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); | 2241 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); |
2242 | } | 2242 | } |
2243 | } | 2243 | } |
@@ -2528,10 +2528,10 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | |||
2528 | } | 2528 | } |
2529 | } | 2529 | } |
2530 | sg_list.addr = dma_map_single(qp_info->port_priv-> | 2530 | sg_list.addr = dma_map_single(qp_info->port_priv-> |
2531 | device->dma_device, | 2531 | device->dma_device, |
2532 | &mad_priv->grh, | 2532 | &mad_priv->grh, |
2533 | sizeof *mad_priv - | 2533 | sizeof *mad_priv - |
2534 | sizeof mad_priv->header, | 2534 | sizeof mad_priv->header, |
2535 | DMA_FROM_DEVICE); | 2535 | DMA_FROM_DEVICE); |
2536 | pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); | 2536 | pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); |
2537 | recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; | 2537 | recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; |
@@ -2606,7 +2606,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
2606 | struct ib_qp *qp; | 2606 | struct ib_qp *qp; |
2607 | 2607 | ||
2608 | attr = kmalloc(sizeof *attr, GFP_KERNEL); | 2608 | attr = kmalloc(sizeof *attr, GFP_KERNEL); |
2609 | if (!attr) { | 2609 | if (!attr) { |
2610 | printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); | 2610 | printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); |
2611 | return -ENOMEM; | 2611 | return -ENOMEM; |
2612 | } | 2612 | } |
@@ -2876,7 +2876,10 @@ static void ib_mad_init_device(struct ib_device *device) | |||
2876 | { | 2876 | { |
2877 | int start, end, i; | 2877 | int start, end, i; |
2878 | 2878 | ||
2879 | if (device->node_type == IB_NODE_SWITCH) { | 2879 | if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) |
2880 | return; | ||
2881 | |||
2882 | if (device->node_type == RDMA_NODE_IB_SWITCH) { | ||
2880 | start = 0; | 2883 | start = 0; |
2881 | end = 0; | 2884 | end = 0; |
2882 | } else { | 2885 | } else { |
@@ -2923,7 +2926,7 @@ static void ib_mad_remove_device(struct ib_device *device) | |||
2923 | { | 2926 | { |
2924 | int i, num_ports, cur_port; | 2927 | int i, num_ports, cur_port; |
2925 | 2928 | ||
2926 | if (device->node_type == IB_NODE_SWITCH) { | 2929 | if (device->node_type == RDMA_NODE_IB_SWITCH) { |
2927 | num_ports = 1; | 2930 | num_ports = 1; |
2928 | cur_port = 0; | 2931 | cur_port = 0; |
2929 | } else { | 2932 | } else { |
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index d147f3bad2ce..1da9adbccaec 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h | |||
@@ -39,7 +39,6 @@ | |||
39 | 39 | ||
40 | #include <linux/completion.h> | 40 | #include <linux/completion.h> |
41 | #include <linux/pci.h> | 41 | #include <linux/pci.h> |
42 | #include <linux/kthread.h> | ||
43 | #include <linux/workqueue.h> | 42 | #include <linux/workqueue.h> |
44 | #include <rdma/ib_mad.h> | 43 | #include <rdma/ib_mad.h> |
45 | #include <rdma/ib_smi.h> | 44 | #include <rdma/ib_smi.h> |
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index ebcd5b181770..1ef79d015a1e 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c | |||
@@ -33,8 +33,6 @@ | |||
33 | * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $ | 33 | * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $ |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #include <linux/dma-mapping.h> | ||
37 | |||
38 | #include "mad_priv.h" | 36 | #include "mad_priv.h" |
39 | #include "mad_rmpp.h" | 37 | #include "mad_rmpp.h" |
40 | 38 | ||
@@ -60,6 +58,7 @@ struct mad_rmpp_recv { | |||
60 | int last_ack; | 58 | int last_ack; |
61 | int seg_num; | 59 | int seg_num; |
62 | int newwin; | 60 | int newwin; |
61 | int repwin; | ||
63 | 62 | ||
64 | __be64 tid; | 63 | __be64 tid; |
65 | u32 src_qp; | 64 | u32 src_qp; |
@@ -170,6 +169,32 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, | |||
170 | return msg; | 169 | return msg; |
171 | } | 170 | } |
172 | 171 | ||
172 | static void ack_ds_ack(struct ib_mad_agent_private *agent, | ||
173 | struct ib_mad_recv_wc *recv_wc) | ||
174 | { | ||
175 | struct ib_mad_send_buf *msg; | ||
176 | struct ib_rmpp_mad *rmpp_mad; | ||
177 | int ret; | ||
178 | |||
179 | msg = alloc_response_msg(&agent->agent, recv_wc); | ||
180 | if (IS_ERR(msg)) | ||
181 | return; | ||
182 | |||
183 | rmpp_mad = msg->mad; | ||
184 | memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); | ||
185 | |||
186 | rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; | ||
187 | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); | ||
188 | rmpp_mad->rmpp_hdr.seg_num = 0; | ||
189 | rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1); | ||
190 | |||
191 | ret = ib_post_send_mad(msg, NULL); | ||
192 | if (ret) { | ||
193 | ib_destroy_ah(msg->ah); | ||
194 | ib_free_send_mad(msg); | ||
195 | } | ||
196 | } | ||
197 | |||
173 | void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) | 198 | void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) |
174 | { | 199 | { |
175 | struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad; | 200 | struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad; |
@@ -271,6 +296,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, | |||
271 | rmpp_recv->newwin = 1; | 296 | rmpp_recv->newwin = 1; |
272 | rmpp_recv->seg_num = 1; | 297 | rmpp_recv->seg_num = 1; |
273 | rmpp_recv->last_ack = 0; | 298 | rmpp_recv->last_ack = 0; |
299 | rmpp_recv->repwin = 1; | ||
274 | 300 | ||
275 | mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; | 301 | mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; |
276 | rmpp_recv->tid = mad_hdr->tid; | 302 | rmpp_recv->tid = mad_hdr->tid; |
@@ -365,7 +391,7 @@ static inline int window_size(struct ib_mad_agent_private *agent) | |||
365 | static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list, | 391 | static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list, |
366 | int seg_num) | 392 | int seg_num) |
367 | { | 393 | { |
368 | struct ib_mad_recv_buf *seg_buf; | 394 | struct ib_mad_recv_buf *seg_buf; |
369 | int cur_seg_num; | 395 | int cur_seg_num; |
370 | 396 | ||
371 | list_for_each_entry_reverse(seg_buf, rmpp_list, list) { | 397 | list_for_each_entry_reverse(seg_buf, rmpp_list, list) { |
@@ -591,6 +617,16 @@ static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr, | |||
591 | break; | 617 | break; |
592 | } | 618 | } |
593 | 619 | ||
620 | static void process_ds_ack(struct ib_mad_agent_private *agent, | ||
621 | struct ib_mad_recv_wc *mad_recv_wc, int newwin) | ||
622 | { | ||
623 | struct mad_rmpp_recv *rmpp_recv; | ||
624 | |||
625 | rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); | ||
626 | if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE) | ||
627 | rmpp_recv->repwin = newwin; | ||
628 | } | ||
629 | |||
594 | static void process_rmpp_ack(struct ib_mad_agent_private *agent, | 630 | static void process_rmpp_ack(struct ib_mad_agent_private *agent, |
595 | struct ib_mad_recv_wc *mad_recv_wc) | 631 | struct ib_mad_recv_wc *mad_recv_wc) |
596 | { | 632 | { |
@@ -616,8 +652,18 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, | |||
616 | 652 | ||
617 | spin_lock_irqsave(&agent->lock, flags); | 653 | spin_lock_irqsave(&agent->lock, flags); |
618 | mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); | 654 | mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); |
619 | if (!mad_send_wr) | 655 | if (!mad_send_wr) { |
620 | goto out; /* Unmatched ACK */ | 656 | if (!seg_num) |
657 | process_ds_ack(agent, mad_recv_wc, newwin); | ||
658 | goto out; /* Unmatched or DS RMPP ACK */ | ||
659 | } | ||
660 | |||
661 | if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) && | ||
662 | (mad_send_wr->timeout)) { | ||
663 | spin_unlock_irqrestore(&agent->lock, flags); | ||
664 | ack_ds_ack(agent, mad_recv_wc); | ||
665 | return; /* Repeated ACK for DS RMPP transaction */ | ||
666 | } | ||
621 | 667 | ||
622 | if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || | 668 | if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || |
623 | (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) | 669 | (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) |
@@ -656,6 +702,9 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, | |||
656 | if (mad_send_wr->refcount == 1) | 702 | if (mad_send_wr->refcount == 1) |
657 | ib_reset_mad_timeout(mad_send_wr, | 703 | ib_reset_mad_timeout(mad_send_wr, |
658 | mad_send_wr->send_buf.timeout_ms); | 704 | mad_send_wr->send_buf.timeout_ms); |
705 | spin_unlock_irqrestore(&agent->lock, flags); | ||
706 | ack_ds_ack(agent, mad_recv_wc); | ||
707 | return; | ||
659 | } else if (mad_send_wr->refcount == 1 && | 708 | } else if (mad_send_wr->refcount == 1 && |
660 | mad_send_wr->seg_num < mad_send_wr->newwin && | 709 | mad_send_wr->seg_num < mad_send_wr->newwin && |
661 | mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) { | 710 | mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) { |
@@ -772,6 +821,39 @@ out: | |||
772 | return NULL; | 821 | return NULL; |
773 | } | 822 | } |
774 | 823 | ||
824 | static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr) | ||
825 | { | ||
826 | struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv; | ||
827 | struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad; | ||
828 | struct mad_rmpp_recv *rmpp_recv; | ||
829 | struct ib_ah_attr ah_attr; | ||
830 | unsigned long flags; | ||
831 | int newwin = 1; | ||
832 | |||
833 | if (!(mad_hdr->method & IB_MGMT_METHOD_RESP)) | ||
834 | goto out; | ||
835 | |||
836 | spin_lock_irqsave(&agent->lock, flags); | ||
837 | list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { | ||
838 | if (rmpp_recv->tid != mad_hdr->tid || | ||
839 | rmpp_recv->mgmt_class != mad_hdr->mgmt_class || | ||
840 | rmpp_recv->class_version != mad_hdr->class_version || | ||
841 | (rmpp_recv->method & IB_MGMT_METHOD_RESP)) | ||
842 | continue; | ||
843 | |||
844 | if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr)) | ||
845 | continue; | ||
846 | |||
847 | if (rmpp_recv->slid == ah_attr.dlid) { | ||
848 | newwin = rmpp_recv->repwin; | ||
849 | break; | ||
850 | } | ||
851 | } | ||
852 | spin_unlock_irqrestore(&agent->lock, flags); | ||
853 | out: | ||
854 | return newwin; | ||
855 | } | ||
856 | |||
775 | int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) | 857 | int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) |
776 | { | 858 | { |
777 | struct ib_rmpp_mad *rmpp_mad; | 859 | struct ib_rmpp_mad *rmpp_mad; |
@@ -787,7 +869,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||
787 | return IB_RMPP_RESULT_INTERNAL; | 869 | return IB_RMPP_RESULT_INTERNAL; |
788 | } | 870 | } |
789 | 871 | ||
790 | mad_send_wr->newwin = 1; | 872 | mad_send_wr->newwin = init_newwin(mad_send_wr); |
791 | 873 | ||
792 | /* We need to wait for the final ACK even if there isn't a response */ | 874 | /* We need to wait for the final ACK even if there isn't a response */ |
793 | mad_send_wr->refcount += (mad_send_wr->timeout == 0); | 875 | mad_send_wr->refcount += (mad_send_wr->timeout == 0); |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index d6b84226bba7..1706d3c7e95e 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. | 3 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. |
4 | * Copyright (c) 2006 Intel Corporation. All rights reserved. | ||
4 | * | 5 | * |
5 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
6 | * licenses. You may choose to be licensed under the terms of the GNU | 7 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -75,6 +76,7 @@ struct ib_sa_device { | |||
75 | struct ib_sa_query { | 76 | struct ib_sa_query { |
76 | void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); | 77 | void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); |
77 | void (*release)(struct ib_sa_query *); | 78 | void (*release)(struct ib_sa_query *); |
79 | struct ib_sa_client *client; | ||
78 | struct ib_sa_port *port; | 80 | struct ib_sa_port *port; |
79 | struct ib_mad_send_buf *mad_buf; | 81 | struct ib_mad_send_buf *mad_buf; |
80 | struct ib_sa_sm_ah *sm_ah; | 82 | struct ib_sa_sm_ah *sm_ah; |
@@ -415,6 +417,31 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event | |||
415 | } | 417 | } |
416 | } | 418 | } |
417 | 419 | ||
420 | void ib_sa_register_client(struct ib_sa_client *client) | ||
421 | { | ||
422 | atomic_set(&client->users, 1); | ||
423 | init_completion(&client->comp); | ||
424 | } | ||
425 | EXPORT_SYMBOL(ib_sa_register_client); | ||
426 | |||
427 | static inline void ib_sa_client_get(struct ib_sa_client *client) | ||
428 | { | ||
429 | atomic_inc(&client->users); | ||
430 | } | ||
431 | |||
432 | static inline void ib_sa_client_put(struct ib_sa_client *client) | ||
433 | { | ||
434 | if (atomic_dec_and_test(&client->users)) | ||
435 | complete(&client->comp); | ||
436 | } | ||
437 | |||
438 | void ib_sa_unregister_client(struct ib_sa_client *client) | ||
439 | { | ||
440 | ib_sa_client_put(client); | ||
441 | wait_for_completion(&client->comp); | ||
442 | } | ||
443 | EXPORT_SYMBOL(ib_sa_unregister_client); | ||
444 | |||
418 | /** | 445 | /** |
419 | * ib_sa_cancel_query - try to cancel an SA query | 446 | * ib_sa_cancel_query - try to cancel an SA query |
420 | * @id:ID of query to cancel | 447 | * @id:ID of query to cancel |
@@ -557,6 +584,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) | |||
557 | 584 | ||
558 | /** | 585 | /** |
559 | * ib_sa_path_rec_get - Start a Path get query | 586 | * ib_sa_path_rec_get - Start a Path get query |
587 | * @client:SA client | ||
560 | * @device:device to send query on | 588 | * @device:device to send query on |
561 | * @port_num: port number to send query on | 589 | * @port_num: port number to send query on |
562 | * @rec:Path Record to send in query | 590 | * @rec:Path Record to send in query |
@@ -579,7 +607,8 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) | |||
579 | * error code. Otherwise it is a query ID that can be used to cancel | 607 | * error code. Otherwise it is a query ID that can be used to cancel |
580 | * the query. | 608 | * the query. |
581 | */ | 609 | */ |
582 | int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, | 610 | int ib_sa_path_rec_get(struct ib_sa_client *client, |
611 | struct ib_device *device, u8 port_num, | ||
583 | struct ib_sa_path_rec *rec, | 612 | struct ib_sa_path_rec *rec, |
584 | ib_sa_comp_mask comp_mask, | 613 | ib_sa_comp_mask comp_mask, |
585 | int timeout_ms, gfp_t gfp_mask, | 614 | int timeout_ms, gfp_t gfp_mask, |
@@ -614,8 +643,10 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, | |||
614 | goto err1; | 643 | goto err1; |
615 | } | 644 | } |
616 | 645 | ||
617 | query->callback = callback; | 646 | ib_sa_client_get(client); |
618 | query->context = context; | 647 | query->sa_query.client = client; |
648 | query->callback = callback; | ||
649 | query->context = context; | ||
619 | 650 | ||
620 | mad = query->sa_query.mad_buf->mad; | 651 | mad = query->sa_query.mad_buf->mad; |
621 | init_mad(mad, agent); | 652 | init_mad(mad, agent); |
@@ -639,6 +670,7 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, | |||
639 | 670 | ||
640 | err2: | 671 | err2: |
641 | *sa_query = NULL; | 672 | *sa_query = NULL; |
673 | ib_sa_client_put(query->sa_query.client); | ||
642 | ib_free_send_mad(query->sa_query.mad_buf); | 674 | ib_free_send_mad(query->sa_query.mad_buf); |
643 | 675 | ||
644 | err1: | 676 | err1: |
@@ -671,6 +703,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) | |||
671 | 703 | ||
672 | /** | 704 | /** |
673 | * ib_sa_service_rec_query - Start Service Record operation | 705 | * ib_sa_service_rec_query - Start Service Record operation |
706 | * @client:SA client | ||
674 | * @device:device to send request on | 707 | * @device:device to send request on |
675 | * @port_num: port number to send request on | 708 | * @port_num: port number to send request on |
676 | * @method:SA method - should be get, set, or delete | 709 | * @method:SA method - should be get, set, or delete |
@@ -695,7 +728,8 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) | |||
695 | * error code. Otherwise it is a request ID that can be used to cancel | 728 | * error code. Otherwise it is a request ID that can be used to cancel |
696 | * the query. | 729 | * the query. |
697 | */ | 730 | */ |
698 | int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, | 731 | int ib_sa_service_rec_query(struct ib_sa_client *client, |
732 | struct ib_device *device, u8 port_num, u8 method, | ||
699 | struct ib_sa_service_rec *rec, | 733 | struct ib_sa_service_rec *rec, |
700 | ib_sa_comp_mask comp_mask, | 734 | ib_sa_comp_mask comp_mask, |
701 | int timeout_ms, gfp_t gfp_mask, | 735 | int timeout_ms, gfp_t gfp_mask, |
@@ -735,8 +769,10 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, | |||
735 | goto err1; | 769 | goto err1; |
736 | } | 770 | } |
737 | 771 | ||
738 | query->callback = callback; | 772 | ib_sa_client_get(client); |
739 | query->context = context; | 773 | query->sa_query.client = client; |
774 | query->callback = callback; | ||
775 | query->context = context; | ||
740 | 776 | ||
741 | mad = query->sa_query.mad_buf->mad; | 777 | mad = query->sa_query.mad_buf->mad; |
742 | init_mad(mad, agent); | 778 | init_mad(mad, agent); |
@@ -761,6 +797,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, | |||
761 | 797 | ||
762 | err2: | 798 | err2: |
763 | *sa_query = NULL; | 799 | *sa_query = NULL; |
800 | ib_sa_client_put(query->sa_query.client); | ||
764 | ib_free_send_mad(query->sa_query.mad_buf); | 801 | ib_free_send_mad(query->sa_query.mad_buf); |
765 | 802 | ||
766 | err1: | 803 | err1: |
@@ -791,7 +828,8 @@ static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) | |||
791 | kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); | 828 | kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); |
792 | } | 829 | } |
793 | 830 | ||
794 | int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, | 831 | int ib_sa_mcmember_rec_query(struct ib_sa_client *client, |
832 | struct ib_device *device, u8 port_num, | ||
795 | u8 method, | 833 | u8 method, |
796 | struct ib_sa_mcmember_rec *rec, | 834 | struct ib_sa_mcmember_rec *rec, |
797 | ib_sa_comp_mask comp_mask, | 835 | ib_sa_comp_mask comp_mask, |
@@ -827,8 +865,10 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, | |||
827 | goto err1; | 865 | goto err1; |
828 | } | 866 | } |
829 | 867 | ||
830 | query->callback = callback; | 868 | ib_sa_client_get(client); |
831 | query->context = context; | 869 | query->sa_query.client = client; |
870 | query->callback = callback; | ||
871 | query->context = context; | ||
832 | 872 | ||
833 | mad = query->sa_query.mad_buf->mad; | 873 | mad = query->sa_query.mad_buf->mad; |
834 | init_mad(mad, agent); | 874 | init_mad(mad, agent); |
@@ -853,6 +893,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, | |||
853 | 893 | ||
854 | err2: | 894 | err2: |
855 | *sa_query = NULL; | 895 | *sa_query = NULL; |
896 | ib_sa_client_put(query->sa_query.client); | ||
856 | ib_free_send_mad(query->sa_query.mad_buf); | 897 | ib_free_send_mad(query->sa_query.mad_buf); |
857 | 898 | ||
858 | err1: | 899 | err1: |
@@ -887,8 +928,9 @@ static void send_handler(struct ib_mad_agent *agent, | |||
887 | idr_remove(&query_idr, query->id); | 928 | idr_remove(&query_idr, query->id); |
888 | spin_unlock_irqrestore(&idr_lock, flags); | 929 | spin_unlock_irqrestore(&idr_lock, flags); |
889 | 930 | ||
890 | ib_free_send_mad(mad_send_wc->send_buf); | 931 | ib_free_send_mad(mad_send_wc->send_buf); |
891 | kref_put(&query->sm_ah->ref, free_sm_ah); | 932 | kref_put(&query->sm_ah->ref, free_sm_ah); |
933 | ib_sa_client_put(query->client); | ||
892 | query->release(query); | 934 | query->release(query); |
893 | } | 935 | } |
894 | 936 | ||
@@ -919,7 +961,10 @@ static void ib_sa_add_one(struct ib_device *device) | |||
919 | struct ib_sa_device *sa_dev; | 961 | struct ib_sa_device *sa_dev; |
920 | int s, e, i; | 962 | int s, e, i; |
921 | 963 | ||
922 | if (device->node_type == IB_NODE_SWITCH) | 964 | if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) |
965 | return; | ||
966 | |||
967 | if (device->node_type == RDMA_NODE_IB_SWITCH) | ||
923 | s = e = 0; | 968 | s = e = 0; |
924 | else { | 969 | else { |
925 | s = 1; | 970 | s = 1; |
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index 35852e794e26..54b81e17ad50 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c | |||
@@ -64,7 +64,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, | |||
64 | 64 | ||
65 | /* C14-9:2 */ | 65 | /* C14-9:2 */ |
66 | if (hop_ptr && hop_ptr < hop_cnt) { | 66 | if (hop_ptr && hop_ptr < hop_cnt) { |
67 | if (node_type != IB_NODE_SWITCH) | 67 | if (node_type != RDMA_NODE_IB_SWITCH) |
68 | return 0; | 68 | return 0; |
69 | 69 | ||
70 | /* smp->return_path set when received */ | 70 | /* smp->return_path set when received */ |
@@ -77,7 +77,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, | |||
77 | if (hop_ptr == hop_cnt) { | 77 | if (hop_ptr == hop_cnt) { |
78 | /* smp->return_path set when received */ | 78 | /* smp->return_path set when received */ |
79 | smp->hop_ptr++; | 79 | smp->hop_ptr++; |
80 | return (node_type == IB_NODE_SWITCH || | 80 | return (node_type == RDMA_NODE_IB_SWITCH || |
81 | smp->dr_dlid == IB_LID_PERMISSIVE); | 81 | smp->dr_dlid == IB_LID_PERMISSIVE); |
82 | } | 82 | } |
83 | 83 | ||
@@ -95,7 +95,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, | |||
95 | 95 | ||
96 | /* C14-13:2 */ | 96 | /* C14-13:2 */ |
97 | if (2 <= hop_ptr && hop_ptr <= hop_cnt) { | 97 | if (2 <= hop_ptr && hop_ptr <= hop_cnt) { |
98 | if (node_type != IB_NODE_SWITCH) | 98 | if (node_type != RDMA_NODE_IB_SWITCH) |
99 | return 0; | 99 | return 0; |
100 | 100 | ||
101 | smp->hop_ptr--; | 101 | smp->hop_ptr--; |
@@ -107,7 +107,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, | |||
107 | if (hop_ptr == 1) { | 107 | if (hop_ptr == 1) { |
108 | smp->hop_ptr--; | 108 | smp->hop_ptr--; |
109 | /* C14-13:3 -- SMPs destined for SM shouldn't be here */ | 109 | /* C14-13:3 -- SMPs destined for SM shouldn't be here */ |
110 | return (node_type == IB_NODE_SWITCH || | 110 | return (node_type == RDMA_NODE_IB_SWITCH || |
111 | smp->dr_slid == IB_LID_PERMISSIVE); | 111 | smp->dr_slid == IB_LID_PERMISSIVE); |
112 | } | 112 | } |
113 | 113 | ||
@@ -142,7 +142,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, | |||
142 | 142 | ||
143 | /* C14-9:2 -- intermediate hop */ | 143 | /* C14-9:2 -- intermediate hop */ |
144 | if (hop_ptr && hop_ptr < hop_cnt) { | 144 | if (hop_ptr && hop_ptr < hop_cnt) { |
145 | if (node_type != IB_NODE_SWITCH) | 145 | if (node_type != RDMA_NODE_IB_SWITCH) |
146 | return 0; | 146 | return 0; |
147 | 147 | ||
148 | smp->return_path[hop_ptr] = port_num; | 148 | smp->return_path[hop_ptr] = port_num; |
@@ -156,7 +156,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, | |||
156 | smp->return_path[hop_ptr] = port_num; | 156 | smp->return_path[hop_ptr] = port_num; |
157 | /* smp->hop_ptr updated when sending */ | 157 | /* smp->hop_ptr updated when sending */ |
158 | 158 | ||
159 | return (node_type == IB_NODE_SWITCH || | 159 | return (node_type == RDMA_NODE_IB_SWITCH || |
160 | smp->dr_dlid == IB_LID_PERMISSIVE); | 160 | smp->dr_dlid == IB_LID_PERMISSIVE); |
161 | } | 161 | } |
162 | 162 | ||
@@ -175,7 +175,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, | |||
175 | 175 | ||
176 | /* C14-13:2 */ | 176 | /* C14-13:2 */ |
177 | if (2 <= hop_ptr && hop_ptr <= hop_cnt) { | 177 | if (2 <= hop_ptr && hop_ptr <= hop_cnt) { |
178 | if (node_type != IB_NODE_SWITCH) | 178 | if (node_type != RDMA_NODE_IB_SWITCH) |
179 | return 0; | 179 | return 0; |
180 | 180 | ||
181 | /* smp->hop_ptr updated when sending */ | 181 | /* smp->hop_ptr updated when sending */ |
@@ -190,7 +190,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, | |||
190 | return 1; | 190 | return 1; |
191 | } | 191 | } |
192 | /* smp->hop_ptr updated when sending */ | 192 | /* smp->hop_ptr updated when sending */ |
193 | return (node_type == IB_NODE_SWITCH); | 193 | return (node_type == RDMA_NODE_IB_SWITCH); |
194 | } | 194 | } |
195 | 195 | ||
196 | /* C14-13:4 -- hop_ptr = 0 -> give to SM */ | 196 | /* C14-13:4 -- hop_ptr = 0 -> give to SM */ |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 21f9282c1b25..709323c14c5d 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
@@ -68,7 +68,7 @@ struct port_table_attribute { | |||
68 | int index; | 68 | int index; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static inline int ibdev_is_alive(const struct ib_device *dev) | 71 | static inline int ibdev_is_alive(const struct ib_device *dev) |
72 | { | 72 | { |
73 | return dev->reg_state == IB_DEV_REGISTERED; | 73 | return dev->reg_state == IB_DEV_REGISTERED; |
74 | } | 74 | } |
@@ -589,10 +589,11 @@ static ssize_t show_node_type(struct class_device *cdev, char *buf) | |||
589 | return -ENODEV; | 589 | return -ENODEV; |
590 | 590 | ||
591 | switch (dev->node_type) { | 591 | switch (dev->node_type) { |
592 | case IB_NODE_CA: return sprintf(buf, "%d: CA\n", dev->node_type); | 592 | case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type); |
593 | case IB_NODE_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type); | 593 | case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type); |
594 | case IB_NODE_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type); | 594 | case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type); |
595 | default: return sprintf(buf, "%d: <unknown>\n", dev->node_type); | 595 | case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type); |
596 | default: return sprintf(buf, "%d: <unknown>\n", dev->node_type); | ||
596 | } | 597 | } |
597 | } | 598 | } |
598 | 599 | ||
@@ -708,7 +709,7 @@ int ib_device_register_sysfs(struct ib_device *device) | |||
708 | if (ret) | 709 | if (ret) |
709 | goto err_put; | 710 | goto err_put; |
710 | 711 | ||
711 | if (device->node_type == IB_NODE_SWITCH) { | 712 | if (device->node_type == RDMA_NODE_IB_SWITCH) { |
712 | ret = add_port(device, 0); | 713 | ret = add_port(device, 0); |
713 | if (ret) | 714 | if (ret) |
714 | goto err_put; | 715 | goto err_put; |
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index c1c6fda9452c..ad4f4d5c2924 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
@@ -309,9 +309,9 @@ static int ib_ucm_event_process(struct ib_cm_event *evt, | |||
309 | info = evt->param.apr_rcvd.apr_info; | 309 | info = evt->param.apr_rcvd.apr_info; |
310 | break; | 310 | break; |
311 | case IB_CM_SIDR_REQ_RECEIVED: | 311 | case IB_CM_SIDR_REQ_RECEIVED: |
312 | uvt->resp.u.sidr_req_resp.pkey = | 312 | uvt->resp.u.sidr_req_resp.pkey = |
313 | evt->param.sidr_req_rcvd.pkey; | 313 | evt->param.sidr_req_rcvd.pkey; |
314 | uvt->resp.u.sidr_req_resp.port = | 314 | uvt->resp.u.sidr_req_resp.port = |
315 | evt->param.sidr_req_rcvd.port; | 315 | evt->param.sidr_req_rcvd.port; |
316 | uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; | 316 | uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; |
317 | break; | 317 | break; |
@@ -1237,7 +1237,7 @@ static struct class ucm_class = { | |||
1237 | static ssize_t show_ibdev(struct class_device *class_dev, char *buf) | 1237 | static ssize_t show_ibdev(struct class_device *class_dev, char *buf) |
1238 | { | 1238 | { |
1239 | struct ib_ucm_device *dev; | 1239 | struct ib_ucm_device *dev; |
1240 | 1240 | ||
1241 | dev = container_of(class_dev, struct ib_ucm_device, class_dev); | 1241 | dev = container_of(class_dev, struct ib_ucm_device, class_dev); |
1242 | return sprintf(buf, "%s\n", dev->ib_dev->name); | 1242 | return sprintf(buf, "%s\n", dev->ib_dev->name); |
1243 | } | 1243 | } |
@@ -1247,7 +1247,8 @@ static void ib_ucm_add_one(struct ib_device *device) | |||
1247 | { | 1247 | { |
1248 | struct ib_ucm_device *ucm_dev; | 1248 | struct ib_ucm_device *ucm_dev; |
1249 | 1249 | ||
1250 | if (!device->alloc_ucontext) | 1250 | if (!device->alloc_ucontext || |
1251 | rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) | ||
1251 | return; | 1252 | return; |
1252 | 1253 | ||
1253 | ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL); | 1254 | ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL); |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 1273f8807e84..807fbd6b8414 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. | 3 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. |
4 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | 4 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
@@ -1032,7 +1032,10 @@ static void ib_umad_add_one(struct ib_device *device) | |||
1032 | struct ib_umad_device *umad_dev; | 1032 | struct ib_umad_device *umad_dev; |
1033 | int s, e, i; | 1033 | int s, e, i; |
1034 | 1034 | ||
1035 | if (device->node_type == IB_NODE_SWITCH) | 1035 | if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) |
1036 | return; | ||
1037 | |||
1038 | if (device->node_type == RDMA_NODE_IB_SWITCH) | ||
1036 | s = e = 0; | 1039 | s = e = 0; |
1037 | else { | 1040 | else { |
1038 | s = 1; | 1041 | s = 1; |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 30923eb68ec7..b72c7f69ca90 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -155,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, | |||
155 | } | 155 | } |
156 | 156 | ||
157 | static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, | 157 | static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, |
158 | struct ib_ucontext *context) | 158 | struct ib_ucontext *context, int nested) |
159 | { | 159 | { |
160 | struct ib_uobject *uobj; | 160 | struct ib_uobject *uobj; |
161 | 161 | ||
@@ -163,7 +163,10 @@ static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, | |||
163 | if (!uobj) | 163 | if (!uobj) |
164 | return NULL; | 164 | return NULL; |
165 | 165 | ||
166 | down_read(&uobj->mutex); | 166 | if (nested) |
167 | down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); | ||
168 | else | ||
169 | down_read(&uobj->mutex); | ||
167 | if (!uobj->live) { | 170 | if (!uobj->live) { |
168 | put_uobj_read(uobj); | 171 | put_uobj_read(uobj); |
169 | return NULL; | 172 | return NULL; |
@@ -190,17 +193,18 @@ static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, | |||
190 | return uobj; | 193 | return uobj; |
191 | } | 194 | } |
192 | 195 | ||
193 | static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context) | 196 | static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, |
197 | int nested) | ||
194 | { | 198 | { |
195 | struct ib_uobject *uobj; | 199 | struct ib_uobject *uobj; |
196 | 200 | ||
197 | uobj = idr_read_uobj(idr, id, context); | 201 | uobj = idr_read_uobj(idr, id, context, nested); |
198 | return uobj ? uobj->object : NULL; | 202 | return uobj ? uobj->object : NULL; |
199 | } | 203 | } |
200 | 204 | ||
201 | static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) | 205 | static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) |
202 | { | 206 | { |
203 | return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context); | 207 | return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); |
204 | } | 208 | } |
205 | 209 | ||
206 | static void put_pd_read(struct ib_pd *pd) | 210 | static void put_pd_read(struct ib_pd *pd) |
@@ -208,9 +212,9 @@ static void put_pd_read(struct ib_pd *pd) | |||
208 | put_uobj_read(pd->uobject); | 212 | put_uobj_read(pd->uobject); |
209 | } | 213 | } |
210 | 214 | ||
211 | static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context) | 215 | static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) |
212 | { | 216 | { |
213 | return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context); | 217 | return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); |
214 | } | 218 | } |
215 | 219 | ||
216 | static void put_cq_read(struct ib_cq *cq) | 220 | static void put_cq_read(struct ib_cq *cq) |
@@ -220,7 +224,7 @@ static void put_cq_read(struct ib_cq *cq) | |||
220 | 224 | ||
221 | static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) | 225 | static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) |
222 | { | 226 | { |
223 | return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context); | 227 | return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); |
224 | } | 228 | } |
225 | 229 | ||
226 | static void put_ah_read(struct ib_ah *ah) | 230 | static void put_ah_read(struct ib_ah *ah) |
@@ -230,7 +234,7 @@ static void put_ah_read(struct ib_ah *ah) | |||
230 | 234 | ||
231 | static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) | 235 | static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) |
232 | { | 236 | { |
233 | return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context); | 237 | return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); |
234 | } | 238 | } |
235 | 239 | ||
236 | static void put_qp_read(struct ib_qp *qp) | 240 | static void put_qp_read(struct ib_qp *qp) |
@@ -240,7 +244,7 @@ static void put_qp_read(struct ib_qp *qp) | |||
240 | 244 | ||
241 | static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) | 245 | static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) |
242 | { | 246 | { |
243 | return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context); | 247 | return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); |
244 | } | 248 | } |
245 | 249 | ||
246 | static void put_srq_read(struct ib_srq *srq) | 250 | static void put_srq_read(struct ib_srq *srq) |
@@ -837,7 +841,6 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, | |||
837 | err_copy: | 841 | err_copy: |
838 | idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); | 842 | idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); |
839 | 843 | ||
840 | |||
841 | err_free: | 844 | err_free: |
842 | ib_destroy_cq(cq); | 845 | ib_destroy_cq(cq); |
843 | 846 | ||
@@ -867,7 +870,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, | |||
867 | (unsigned long) cmd.response + sizeof resp, | 870 | (unsigned long) cmd.response + sizeof resp, |
868 | in_len - sizeof cmd, out_len - sizeof resp); | 871 | in_len - sizeof cmd, out_len - sizeof resp); |
869 | 872 | ||
870 | cq = idr_read_cq(cmd.cq_handle, file->ucontext); | 873 | cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); |
871 | if (!cq) | 874 | if (!cq) |
872 | return -EINVAL; | 875 | return -EINVAL; |
873 | 876 | ||
@@ -875,11 +878,10 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, | |||
875 | if (ret) | 878 | if (ret) |
876 | goto out; | 879 | goto out; |
877 | 880 | ||
878 | memset(&resp, 0, sizeof resp); | ||
879 | resp.cqe = cq->cqe; | 881 | resp.cqe = cq->cqe; |
880 | 882 | ||
881 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | 883 | if (copy_to_user((void __user *) (unsigned long) cmd.response, |
882 | &resp, sizeof resp)) | 884 | &resp, sizeof resp.cqe)) |
883 | ret = -EFAULT; | 885 | ret = -EFAULT; |
884 | 886 | ||
885 | out: | 887 | out: |
@@ -894,7 +896,6 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, | |||
894 | { | 896 | { |
895 | struct ib_uverbs_poll_cq cmd; | 897 | struct ib_uverbs_poll_cq cmd; |
896 | struct ib_uverbs_poll_cq_resp *resp; | 898 | struct ib_uverbs_poll_cq_resp *resp; |
897 | struct ib_uobject *uobj; | ||
898 | struct ib_cq *cq; | 899 | struct ib_cq *cq; |
899 | struct ib_wc *wc; | 900 | struct ib_wc *wc; |
900 | int ret = 0; | 901 | int ret = 0; |
@@ -915,16 +916,15 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, | |||
915 | goto out_wc; | 916 | goto out_wc; |
916 | } | 917 | } |
917 | 918 | ||
918 | uobj = idr_read_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); | 919 | cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); |
919 | if (!uobj) { | 920 | if (!cq) { |
920 | ret = -EINVAL; | 921 | ret = -EINVAL; |
921 | goto out; | 922 | goto out; |
922 | } | 923 | } |
923 | cq = uobj->object; | ||
924 | 924 | ||
925 | resp->count = ib_poll_cq(cq, cmd.ne, wc); | 925 | resp->count = ib_poll_cq(cq, cmd.ne, wc); |
926 | 926 | ||
927 | put_uobj_read(uobj); | 927 | put_cq_read(cq); |
928 | 928 | ||
929 | for (i = 0; i < resp->count; i++) { | 929 | for (i = 0; i < resp->count; i++) { |
930 | resp->wc[i].wr_id = wc[i].wr_id; | 930 | resp->wc[i].wr_id = wc[i].wr_id; |
@@ -959,21 +959,19 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, | |||
959 | int out_len) | 959 | int out_len) |
960 | { | 960 | { |
961 | struct ib_uverbs_req_notify_cq cmd; | 961 | struct ib_uverbs_req_notify_cq cmd; |
962 | struct ib_uobject *uobj; | ||
963 | struct ib_cq *cq; | 962 | struct ib_cq *cq; |
964 | 963 | ||
965 | if (copy_from_user(&cmd, buf, sizeof cmd)) | 964 | if (copy_from_user(&cmd, buf, sizeof cmd)) |
966 | return -EFAULT; | 965 | return -EFAULT; |
967 | 966 | ||
968 | uobj = idr_read_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); | 967 | cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); |
969 | if (!uobj) | 968 | if (!cq) |
970 | return -EINVAL; | 969 | return -EINVAL; |
971 | cq = uobj->object; | ||
972 | 970 | ||
973 | ib_req_notify_cq(cq, cmd.solicited_only ? | 971 | ib_req_notify_cq(cq, cmd.solicited_only ? |
974 | IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); | 972 | IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); |
975 | 973 | ||
976 | put_uobj_read(uobj); | 974 | put_cq_read(cq); |
977 | 975 | ||
978 | return in_len; | 976 | return in_len; |
979 | } | 977 | } |
@@ -1064,9 +1062,9 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1064 | 1062 | ||
1065 | srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; | 1063 | srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; |
1066 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 1064 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); |
1067 | scq = idr_read_cq(cmd.send_cq_handle, file->ucontext); | 1065 | scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0); |
1068 | rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? | 1066 | rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? |
1069 | scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext); | 1067 | scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1); |
1070 | 1068 | ||
1071 | if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { | 1069 | if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { |
1072 | ret = -EINVAL; | 1070 | ret = -EINVAL; |
@@ -1274,6 +1272,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, | |||
1274 | int out_len) | 1272 | int out_len) |
1275 | { | 1273 | { |
1276 | struct ib_uverbs_modify_qp cmd; | 1274 | struct ib_uverbs_modify_qp cmd; |
1275 | struct ib_udata udata; | ||
1277 | struct ib_qp *qp; | 1276 | struct ib_qp *qp; |
1278 | struct ib_qp_attr *attr; | 1277 | struct ib_qp_attr *attr; |
1279 | int ret; | 1278 | int ret; |
@@ -1281,6 +1280,9 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, | |||
1281 | if (copy_from_user(&cmd, buf, sizeof cmd)) | 1280 | if (copy_from_user(&cmd, buf, sizeof cmd)) |
1282 | return -EFAULT; | 1281 | return -EFAULT; |
1283 | 1282 | ||
1283 | INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, | ||
1284 | out_len); | ||
1285 | |||
1284 | attr = kmalloc(sizeof *attr, GFP_KERNEL); | 1286 | attr = kmalloc(sizeof *attr, GFP_KERNEL); |
1285 | if (!attr) | 1287 | if (!attr) |
1286 | return -ENOMEM; | 1288 | return -ENOMEM; |
@@ -1337,7 +1339,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, | |||
1337 | attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; | 1339 | attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; |
1338 | attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; | 1340 | attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; |
1339 | 1341 | ||
1340 | ret = ib_modify_qp(qp, attr, cmd.attr_mask); | 1342 | ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata); |
1341 | 1343 | ||
1342 | put_qp_read(qp); | 1344 | put_qp_read(qp); |
1343 | 1345 | ||
@@ -1674,7 +1676,6 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, | |||
1674 | break; | 1676 | break; |
1675 | } | 1677 | } |
1676 | 1678 | ||
1677 | |||
1678 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | 1679 | if (copy_to_user((void __user *) (unsigned long) cmd.response, |
1679 | &resp, sizeof resp)) | 1680 | &resp, sizeof resp)) |
1680 | ret = -EFAULT; | 1681 | ret = -EFAULT; |
@@ -1724,7 +1725,6 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, | |||
1724 | break; | 1725 | break; |
1725 | } | 1726 | } |
1726 | 1727 | ||
1727 | |||
1728 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | 1728 | if (copy_to_user((void __user *) (unsigned long) cmd.response, |
1729 | &resp, sizeof resp)) | 1729 | &resp, sizeof resp)) |
1730 | ret = -EFAULT; | 1730 | ret = -EFAULT; |
@@ -2055,6 +2055,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, | |||
2055 | int out_len) | 2055 | int out_len) |
2056 | { | 2056 | { |
2057 | struct ib_uverbs_modify_srq cmd; | 2057 | struct ib_uverbs_modify_srq cmd; |
2058 | struct ib_udata udata; | ||
2058 | struct ib_srq *srq; | 2059 | struct ib_srq *srq; |
2059 | struct ib_srq_attr attr; | 2060 | struct ib_srq_attr attr; |
2060 | int ret; | 2061 | int ret; |
@@ -2062,6 +2063,9 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, | |||
2062 | if (copy_from_user(&cmd, buf, sizeof cmd)) | 2063 | if (copy_from_user(&cmd, buf, sizeof cmd)) |
2063 | return -EFAULT; | 2064 | return -EFAULT; |
2064 | 2065 | ||
2066 | INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, | ||
2067 | out_len); | ||
2068 | |||
2065 | srq = idr_read_srq(cmd.srq_handle, file->ucontext); | 2069 | srq = idr_read_srq(cmd.srq_handle, file->ucontext); |
2066 | if (!srq) | 2070 | if (!srq) |
2067 | return -EINVAL; | 2071 | return -EINVAL; |
@@ -2069,7 +2073,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, | |||
2069 | attr.max_wr = cmd.max_wr; | 2073 | attr.max_wr = cmd.max_wr; |
2070 | attr.srq_limit = cmd.srq_limit; | 2074 | attr.srq_limit = cmd.srq_limit; |
2071 | 2075 | ||
2072 | ret = ib_modify_srq(srq, &attr, cmd.attr_mask); | 2076 | ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); |
2073 | 2077 | ||
2074 | put_srq_read(srq); | 2078 | put_srq_read(srq); |
2075 | 2079 | ||
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 468999c38803..8b5dd3649bbf 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -79,6 +79,23 @@ enum ib_rate mult_to_ib_rate(int mult) | |||
79 | } | 79 | } |
80 | EXPORT_SYMBOL(mult_to_ib_rate); | 80 | EXPORT_SYMBOL(mult_to_ib_rate); |
81 | 81 | ||
82 | enum rdma_transport_type | ||
83 | rdma_node_get_transport(enum rdma_node_type node_type) | ||
84 | { | ||
85 | switch (node_type) { | ||
86 | case RDMA_NODE_IB_CA: | ||
87 | case RDMA_NODE_IB_SWITCH: | ||
88 | case RDMA_NODE_IB_ROUTER: | ||
89 | return RDMA_TRANSPORT_IB; | ||
90 | case RDMA_NODE_RNIC: | ||
91 | return RDMA_TRANSPORT_IWARP; | ||
92 | default: | ||
93 | BUG(); | ||
94 | return 0; | ||
95 | } | ||
96 | } | ||
97 | EXPORT_SYMBOL(rdma_node_get_transport); | ||
98 | |||
82 | /* Protection domains */ | 99 | /* Protection domains */ |
83 | 100 | ||
84 | struct ib_pd *ib_alloc_pd(struct ib_device *device) | 101 | struct ib_pd *ib_alloc_pd(struct ib_device *device) |
@@ -231,7 +248,7 @@ int ib_modify_srq(struct ib_srq *srq, | |||
231 | struct ib_srq_attr *srq_attr, | 248 | struct ib_srq_attr *srq_attr, |
232 | enum ib_srq_attr_mask srq_attr_mask) | 249 | enum ib_srq_attr_mask srq_attr_mask) |
233 | { | 250 | { |
234 | return srq->device->modify_srq(srq, srq_attr, srq_attr_mask); | 251 | return srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL); |
235 | } | 252 | } |
236 | EXPORT_SYMBOL(ib_modify_srq); | 253 | EXPORT_SYMBOL(ib_modify_srq); |
237 | 254 | ||
@@ -547,7 +564,7 @@ int ib_modify_qp(struct ib_qp *qp, | |||
547 | struct ib_qp_attr *qp_attr, | 564 | struct ib_qp_attr *qp_attr, |
548 | int qp_attr_mask) | 565 | int qp_attr_mask) |
549 | { | 566 | { |
550 | return qp->device->modify_qp(qp, qp_attr, qp_attr_mask); | 567 | return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL); |
551 | } | 568 | } |
552 | EXPORT_SYMBOL(ib_modify_qp); | 569 | EXPORT_SYMBOL(ib_modify_qp); |
553 | 570 | ||