diff options
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r-- | drivers/infiniband/core/addr.c | 30 | ||||
-rw-r--r-- | drivers/infiniband/core/cache.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/core/cm.c | 25 | ||||
-rw-r--r-- | drivers/infiniband/core/cma.c | 22 | ||||
-rw-r--r-- | drivers/infiniband/core/fmr_pool.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/core/mad.c | 22 | ||||
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/core/user_mad.c | 87 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_cmd.c | 42 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_main.c | 8 |
11 files changed, 186 insertions, 76 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index d294bbc42f09..1205e8027829 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <net/arp.h> | 35 | #include <net/arp.h> |
36 | #include <net/neighbour.h> | 36 | #include <net/neighbour.h> |
37 | #include <net/route.h> | 37 | #include <net/route.h> |
38 | #include <net/netevent.h> | ||
38 | #include <rdma/ib_addr.h> | 39 | #include <rdma/ib_addr.h> |
39 | 40 | ||
40 | MODULE_AUTHOR("Sean Hefty"); | 41 | MODULE_AUTHOR("Sean Hefty"); |
@@ -326,25 +327,22 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) | |||
326 | } | 327 | } |
327 | EXPORT_SYMBOL(rdma_addr_cancel); | 328 | EXPORT_SYMBOL(rdma_addr_cancel); |
328 | 329 | ||
329 | static int addr_arp_recv(struct sk_buff *skb, struct net_device *dev, | 330 | static int netevent_callback(struct notifier_block *self, unsigned long event, |
330 | struct packet_type *pkt, struct net_device *orig_dev) | 331 | void *ctx) |
331 | { | 332 | { |
332 | struct arphdr *arp_hdr; | 333 | if (event == NETEVENT_NEIGH_UPDATE) { |
334 | struct neighbour *neigh = ctx; | ||
333 | 335 | ||
334 | arp_hdr = (struct arphdr *) skb->nh.raw; | 336 | if (neigh->dev->type == ARPHRD_INFINIBAND && |
335 | 337 | (neigh->nud_state & NUD_VALID)) { | |
336 | if (arp_hdr->ar_op == htons(ARPOP_REQUEST) || | 338 | set_timeout(jiffies); |
337 | arp_hdr->ar_op == htons(ARPOP_REPLY)) | 339 | } |
338 | set_timeout(jiffies); | 340 | } |
339 | |||
340 | kfree_skb(skb); | ||
341 | return 0; | 341 | return 0; |
342 | } | 342 | } |
343 | 343 | ||
344 | static struct packet_type addr_arp = { | 344 | static struct notifier_block nb = { |
345 | .type = __constant_htons(ETH_P_ARP), | 345 | .notifier_call = netevent_callback |
346 | .func = addr_arp_recv, | ||
347 | .af_packet_priv = (void*) 1, | ||
348 | }; | 346 | }; |
349 | 347 | ||
350 | static int addr_init(void) | 348 | static int addr_init(void) |
@@ -353,13 +351,13 @@ static int addr_init(void) | |||
353 | if (!addr_wq) | 351 | if (!addr_wq) |
354 | return -ENOMEM; | 352 | return -ENOMEM; |
355 | 353 | ||
356 | dev_add_pack(&addr_arp); | 354 | register_netevent_notifier(&nb); |
357 | return 0; | 355 | return 0; |
358 | } | 356 | } |
359 | 357 | ||
360 | static void addr_cleanup(void) | 358 | static void addr_cleanup(void) |
361 | { | 359 | { |
362 | dev_remove_pack(&addr_arp); | 360 | unregister_netevent_notifier(&nb); |
363 | destroy_workqueue(addr_wq); | 361 | destroy_workqueue(addr_wq); |
364 | } | 362 | } |
365 | 363 | ||
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index e05ca2cdc73f..75313ade2e0d 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
@@ -301,7 +301,8 @@ static void ib_cache_event(struct ib_event_handler *handler, | |||
301 | event->event == IB_EVENT_PORT_ACTIVE || | 301 | event->event == IB_EVENT_PORT_ACTIVE || |
302 | event->event == IB_EVENT_LID_CHANGE || | 302 | event->event == IB_EVENT_LID_CHANGE || |
303 | event->event == IB_EVENT_PKEY_CHANGE || | 303 | event->event == IB_EVENT_PKEY_CHANGE || |
304 | event->event == IB_EVENT_SM_CHANGE) { | 304 | event->event == IB_EVENT_SM_CHANGE || |
305 | event->event == IB_EVENT_CLIENT_REREGISTER) { | ||
305 | work = kmalloc(sizeof *work, GFP_ATOMIC); | 306 | work = kmalloc(sizeof *work, GFP_ATOMIC); |
306 | if (work) { | 307 | if (work) { |
307 | INIT_WORK(&work->work, ib_cache_task, work); | 308 | INIT_WORK(&work->work, ib_cache_task, work); |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 3f6705f3083a..0de335b7bfc2 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -701,7 +701,7 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) | |||
701 | } | 701 | } |
702 | } | 702 | } |
703 | 703 | ||
704 | void ib_destroy_cm_id(struct ib_cm_id *cm_id) | 704 | static void cm_destroy_id(struct ib_cm_id *cm_id, int err) |
705 | { | 705 | { |
706 | struct cm_id_private *cm_id_priv; | 706 | struct cm_id_private *cm_id_priv; |
707 | struct cm_work *work; | 707 | struct cm_work *work; |
@@ -735,12 +735,22 @@ retest: | |||
735 | sizeof cm_id_priv->av.port->cm_dev->ca_guid, | 735 | sizeof cm_id_priv->av.port->cm_dev->ca_guid, |
736 | NULL, 0); | 736 | NULL, 0); |
737 | break; | 737 | break; |
738 | case IB_CM_REQ_RCVD: | ||
739 | if (err == -ENOMEM) { | ||
740 | /* Do not reject to allow future retries. */ | ||
741 | cm_reset_to_idle(cm_id_priv); | ||
742 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
743 | } else { | ||
744 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
745 | ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, | ||
746 | NULL, 0, NULL, 0); | ||
747 | } | ||
748 | break; | ||
738 | case IB_CM_MRA_REQ_RCVD: | 749 | case IB_CM_MRA_REQ_RCVD: |
739 | case IB_CM_REP_SENT: | 750 | case IB_CM_REP_SENT: |
740 | case IB_CM_MRA_REP_RCVD: | 751 | case IB_CM_MRA_REP_RCVD: |
741 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); | 752 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
742 | /* Fall through */ | 753 | /* Fall through */ |
743 | case IB_CM_REQ_RCVD: | ||
744 | case IB_CM_MRA_REQ_SENT: | 754 | case IB_CM_MRA_REQ_SENT: |
745 | case IB_CM_REP_RCVD: | 755 | case IB_CM_REP_RCVD: |
746 | case IB_CM_MRA_REP_SENT: | 756 | case IB_CM_MRA_REP_SENT: |
@@ -775,6 +785,11 @@ retest: | |||
775 | kfree(cm_id_priv->private_data); | 785 | kfree(cm_id_priv->private_data); |
776 | kfree(cm_id_priv); | 786 | kfree(cm_id_priv); |
777 | } | 787 | } |
788 | |||
789 | void ib_destroy_cm_id(struct ib_cm_id *cm_id) | ||
790 | { | ||
791 | cm_destroy_id(cm_id, 0); | ||
792 | } | ||
778 | EXPORT_SYMBOL(ib_destroy_cm_id); | 793 | EXPORT_SYMBOL(ib_destroy_cm_id); |
779 | 794 | ||
780 | int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, | 795 | int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, |
@@ -960,8 +975,10 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, | |||
960 | 975 | ||
961 | cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> | 976 | cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> |
962 | id.local_id); | 977 | id.local_id); |
963 | if (IS_ERR(cm_id_priv->timewait_info)) | 978 | if (IS_ERR(cm_id_priv->timewait_info)) { |
979 | ret = PTR_ERR(cm_id_priv->timewait_info); | ||
964 | goto out; | 980 | goto out; |
981 | } | ||
965 | 982 | ||
966 | ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); | 983 | ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); |
967 | if (ret) | 984 | if (ret) |
@@ -1163,7 +1180,7 @@ static void cm_process_work(struct cm_id_private *cm_id_priv, | |||
1163 | } | 1180 | } |
1164 | cm_deref_id(cm_id_priv); | 1181 | cm_deref_id(cm_id_priv); |
1165 | if (ret) | 1182 | if (ret) |
1166 | ib_destroy_cm_id(&cm_id_priv->id); | 1183 | cm_destroy_id(&cm_id_priv->id, ret); |
1167 | } | 1184 | } |
1168 | 1185 | ||
1169 | static void cm_format_mra(struct cm_mra_msg *mra_msg, | 1186 | static void cm_format_mra(struct cm_mra_msg *mra_msg, |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 863f64befc7c..d6f99d5720fc 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -262,14 +262,14 @@ static void cma_detach_from_dev(struct rdma_id_private *id_priv) | |||
262 | static int cma_acquire_ib_dev(struct rdma_id_private *id_priv) | 262 | static int cma_acquire_ib_dev(struct rdma_id_private *id_priv) |
263 | { | 263 | { |
264 | struct cma_device *cma_dev; | 264 | struct cma_device *cma_dev; |
265 | union ib_gid *gid; | 265 | union ib_gid gid; |
266 | int ret = -ENODEV; | 266 | int ret = -ENODEV; |
267 | 267 | ||
268 | gid = ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr); | 268 | ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid), |
269 | 269 | ||
270 | mutex_lock(&lock); | 270 | mutex_lock(&lock); |
271 | list_for_each_entry(cma_dev, &dev_list, list) { | 271 | list_for_each_entry(cma_dev, &dev_list, list) { |
272 | ret = ib_find_cached_gid(cma_dev->device, gid, | 272 | ret = ib_find_cached_gid(cma_dev->device, &gid, |
273 | &id_priv->id.port_num, NULL); | 273 | &id_priv->id.port_num, NULL); |
274 | if (!ret) { | 274 | if (!ret) { |
275 | cma_attach_to_dev(id_priv, cma_dev); | 275 | cma_attach_to_dev(id_priv, cma_dev); |
@@ -812,6 +812,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
812 | cma_modify_qp_err(&id_priv->id); | 812 | cma_modify_qp_err(&id_priv->id); |
813 | status = ib_event->param.rej_rcvd.reason; | 813 | status = ib_event->param.rej_rcvd.reason; |
814 | event = RDMA_CM_EVENT_REJECTED; | 814 | event = RDMA_CM_EVENT_REJECTED; |
815 | private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; | ||
815 | break; | 816 | break; |
816 | default: | 817 | default: |
817 | printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d", | 818 | printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d", |
@@ -1134,8 +1135,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, | |||
1134 | struct ib_sa_path_rec path_rec; | 1135 | struct ib_sa_path_rec path_rec; |
1135 | 1136 | ||
1136 | memset(&path_rec, 0, sizeof path_rec); | 1137 | memset(&path_rec, 0, sizeof path_rec); |
1137 | path_rec.sgid = *ib_addr_get_sgid(addr); | 1138 | ib_addr_get_sgid(addr, &path_rec.sgid); |
1138 | path_rec.dgid = *ib_addr_get_dgid(addr); | 1139 | ib_addr_get_dgid(addr, &path_rec.dgid); |
1139 | path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); | 1140 | path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); |
1140 | path_rec.numb_path = 1; | 1141 | path_rec.numb_path = 1; |
1141 | 1142 | ||
@@ -1263,7 +1264,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) | |||
1263 | { | 1264 | { |
1264 | struct cma_device *cma_dev; | 1265 | struct cma_device *cma_dev; |
1265 | struct ib_port_attr port_attr; | 1266 | struct ib_port_attr port_attr; |
1266 | union ib_gid *gid; | 1267 | union ib_gid gid; |
1267 | u16 pkey; | 1268 | u16 pkey; |
1268 | int ret; | 1269 | int ret; |
1269 | u8 p; | 1270 | u8 p; |
@@ -1284,8 +1285,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) | |||
1284 | } | 1285 | } |
1285 | 1286 | ||
1286 | port_found: | 1287 | port_found: |
1287 | gid = ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr); | 1288 | ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); |
1288 | ret = ib_get_cached_gid(cma_dev->device, p, 0, gid); | ||
1289 | if (ret) | 1289 | if (ret) |
1290 | goto out; | 1290 | goto out; |
1291 | 1291 | ||
@@ -1293,6 +1293,7 @@ port_found: | |||
1293 | if (ret) | 1293 | if (ret) |
1294 | goto out; | 1294 | goto out; |
1295 | 1295 | ||
1296 | ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); | ||
1296 | ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); | 1297 | ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); |
1297 | id_priv->id.port_num = p; | 1298 | id_priv->id.port_num = p; |
1298 | cma_attach_to_dev(id_priv, cma_dev); | 1299 | cma_attach_to_dev(id_priv, cma_dev); |
@@ -1339,6 +1340,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) | |||
1339 | { | 1340 | { |
1340 | struct cma_work *work; | 1341 | struct cma_work *work; |
1341 | struct sockaddr_in *src_in, *dst_in; | 1342 | struct sockaddr_in *src_in, *dst_in; |
1343 | union ib_gid gid; | ||
1342 | int ret; | 1344 | int ret; |
1343 | 1345 | ||
1344 | work = kzalloc(sizeof *work, GFP_KERNEL); | 1346 | work = kzalloc(sizeof *work, GFP_KERNEL); |
@@ -1351,8 +1353,8 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) | |||
1351 | goto err; | 1353 | goto err; |
1352 | } | 1354 | } |
1353 | 1355 | ||
1354 | ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, | 1356 | ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); |
1355 | ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr)); | 1357 | ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); |
1356 | 1358 | ||
1357 | if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) { | 1359 | if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) { |
1358 | src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; | 1360 | src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; |
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 615fe9cc6c56..86a3b2d401db 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c | |||
@@ -426,7 +426,7 @@ EXPORT_SYMBOL(ib_flush_fmr_pool); | |||
426 | struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, | 426 | struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, |
427 | u64 *page_list, | 427 | u64 *page_list, |
428 | int list_len, | 428 | int list_len, |
429 | u64 *io_virtual_address) | 429 | u64 io_virtual_address) |
430 | { | 430 | { |
431 | struct ib_fmr_pool *pool = pool_handle; | 431 | struct ib_fmr_pool *pool = pool_handle; |
432 | struct ib_pool_fmr *fmr; | 432 | struct ib_pool_fmr *fmr; |
@@ -440,7 +440,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, | |||
440 | fmr = ib_fmr_cache_lookup(pool, | 440 | fmr = ib_fmr_cache_lookup(pool, |
441 | page_list, | 441 | page_list, |
442 | list_len, | 442 | list_len, |
443 | *io_virtual_address); | 443 | io_virtual_address); |
444 | if (fmr) { | 444 | if (fmr) { |
445 | /* found in cache */ | 445 | /* found in cache */ |
446 | ++fmr->ref_count; | 446 | ++fmr->ref_count; |
@@ -464,7 +464,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, | |||
464 | spin_unlock_irqrestore(&pool->pool_lock, flags); | 464 | spin_unlock_irqrestore(&pool->pool_lock, flags); |
465 | 465 | ||
466 | result = ib_map_phys_fmr(fmr->fmr, page_list, list_len, | 466 | result = ib_map_phys_fmr(fmr->fmr, page_list, list_len, |
467 | *io_virtual_address); | 467 | io_virtual_address); |
468 | 468 | ||
469 | if (result) { | 469 | if (result) { |
470 | spin_lock_irqsave(&pool->pool_lock, flags); | 470 | spin_lock_irqsave(&pool->pool_lock, flags); |
@@ -481,7 +481,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, | |||
481 | fmr->ref_count = 1; | 481 | fmr->ref_count = 1; |
482 | 482 | ||
483 | if (pool->cache_bucket) { | 483 | if (pool->cache_bucket) { |
484 | fmr->io_virtual_address = *io_virtual_address; | 484 | fmr->io_virtual_address = io_virtual_address; |
485 | fmr->page_list_len = list_len; | 485 | fmr->page_list_len = list_len; |
486 | memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list)); | 486 | memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list)); |
487 | 487 | ||
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 5ed4dab52a6f..1c3cfbbe6a97 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -167,6 +167,15 @@ static int is_vendor_method_in_use( | |||
167 | return 0; | 167 | return 0; |
168 | } | 168 | } |
169 | 169 | ||
170 | int ib_response_mad(struct ib_mad *mad) | ||
171 | { | ||
172 | return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) || | ||
173 | (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || | ||
174 | ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) && | ||
175 | (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP))); | ||
176 | } | ||
177 | EXPORT_SYMBOL(ib_response_mad); | ||
178 | |||
170 | /* | 179 | /* |
171 | * ib_register_mad_agent - Register to send/receive MADs | 180 | * ib_register_mad_agent - Register to send/receive MADs |
172 | */ | 181 | */ |
@@ -570,13 +579,6 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) | |||
570 | } | 579 | } |
571 | EXPORT_SYMBOL(ib_unregister_mad_agent); | 580 | EXPORT_SYMBOL(ib_unregister_mad_agent); |
572 | 581 | ||
573 | static inline int response_mad(struct ib_mad *mad) | ||
574 | { | ||
575 | /* Trap represses are responses although response bit is reset */ | ||
576 | return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || | ||
577 | (mad->mad_hdr.method & IB_MGMT_METHOD_RESP)); | ||
578 | } | ||
579 | |||
580 | static void dequeue_mad(struct ib_mad_list_head *mad_list) | 582 | static void dequeue_mad(struct ib_mad_list_head *mad_list) |
581 | { | 583 | { |
582 | struct ib_mad_queue *mad_queue; | 584 | struct ib_mad_queue *mad_queue; |
@@ -723,7 +725,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
723 | switch (ret) | 725 | switch (ret) |
724 | { | 726 | { |
725 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: | 727 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: |
726 | if (response_mad(&mad_priv->mad.mad) && | 728 | if (ib_response_mad(&mad_priv->mad.mad) && |
727 | mad_agent_priv->agent.recv_handler) { | 729 | mad_agent_priv->agent.recv_handler) { |
728 | local->mad_priv = mad_priv; | 730 | local->mad_priv = mad_priv; |
729 | local->recv_mad_agent = mad_agent_priv; | 731 | local->recv_mad_agent = mad_agent_priv; |
@@ -1551,7 +1553,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv, | |||
1551 | unsigned long flags; | 1553 | unsigned long flags; |
1552 | 1554 | ||
1553 | spin_lock_irqsave(&port_priv->reg_lock, flags); | 1555 | spin_lock_irqsave(&port_priv->reg_lock, flags); |
1554 | if (response_mad(mad)) { | 1556 | if (ib_response_mad(mad)) { |
1555 | u32 hi_tid; | 1557 | u32 hi_tid; |
1556 | struct ib_mad_agent_private *entry; | 1558 | struct ib_mad_agent_private *entry; |
1557 | 1559 | ||
@@ -1799,7 +1801,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1799 | } | 1801 | } |
1800 | 1802 | ||
1801 | /* Complete corresponding request */ | 1803 | /* Complete corresponding request */ |
1802 | if (response_mad(mad_recv_wc->recv_buf.mad)) { | 1804 | if (ib_response_mad(mad_recv_wc->recv_buf.mad)) { |
1803 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 1805 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
1804 | mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); | 1806 | mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); |
1805 | if (!mad_send_wr) { | 1807 | if (!mad_send_wr) { |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index e911c99ff843..d6b84226bba7 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -405,7 +405,8 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event | |||
405 | event->event == IB_EVENT_PORT_ACTIVE || | 405 | event->event == IB_EVENT_PORT_ACTIVE || |
406 | event->event == IB_EVENT_LID_CHANGE || | 406 | event->event == IB_EVENT_LID_CHANGE || |
407 | event->event == IB_EVENT_PKEY_CHANGE || | 407 | event->event == IB_EVENT_PKEY_CHANGE || |
408 | event->event == IB_EVENT_SM_CHANGE) { | 408 | event->event == IB_EVENT_SM_CHANGE || |
409 | event->event == IB_EVENT_CLIENT_REREGISTER) { | ||
409 | struct ib_sa_device *sa_dev; | 410 | struct ib_sa_device *sa_dev; |
410 | sa_dev = container_of(handler, typeof(*sa_dev), event_handler); | 411 | sa_dev = container_of(handler, typeof(*sa_dev), event_handler); |
411 | 412 | ||
@@ -488,13 +489,13 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent) | |||
488 | spin_unlock_irqrestore(&tid_lock, flags); | 489 | spin_unlock_irqrestore(&tid_lock, flags); |
489 | } | 490 | } |
490 | 491 | ||
491 | static int send_mad(struct ib_sa_query *query, int timeout_ms) | 492 | static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) |
492 | { | 493 | { |
493 | unsigned long flags; | 494 | unsigned long flags; |
494 | int ret, id; | 495 | int ret, id; |
495 | 496 | ||
496 | retry: | 497 | retry: |
497 | if (!idr_pre_get(&query_idr, GFP_ATOMIC)) | 498 | if (!idr_pre_get(&query_idr, gfp_mask)) |
498 | return -ENOMEM; | 499 | return -ENOMEM; |
499 | spin_lock_irqsave(&idr_lock, flags); | 500 | spin_lock_irqsave(&idr_lock, flags); |
500 | ret = idr_get_new(&query_idr, query, &id); | 501 | ret = idr_get_new(&query_idr, query, &id); |
@@ -630,7 +631,7 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, | |||
630 | 631 | ||
631 | *sa_query = &query->sa_query; | 632 | *sa_query = &query->sa_query; |
632 | 633 | ||
633 | ret = send_mad(&query->sa_query, timeout_ms); | 634 | ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); |
634 | if (ret < 0) | 635 | if (ret < 0) |
635 | goto err2; | 636 | goto err2; |
636 | 637 | ||
@@ -752,7 +753,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, | |||
752 | 753 | ||
753 | *sa_query = &query->sa_query; | 754 | *sa_query = &query->sa_query; |
754 | 755 | ||
755 | ret = send_mad(&query->sa_query, timeout_ms); | 756 | ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); |
756 | if (ret < 0) | 757 | if (ret < 0) |
757 | goto err2; | 758 | goto err2; |
758 | 759 | ||
@@ -844,7 +845,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, | |||
844 | 845 | ||
845 | *sa_query = &query->sa_query; | 846 | *sa_query = &query->sa_query; |
846 | 847 | ||
847 | ret = send_mad(&query->sa_query, timeout_ms); | 848 | ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); |
848 | if (ret < 0) | 849 | if (ret < 0) |
849 | goto err2; | 850 | goto err2; |
850 | 851 | ||
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index afe70a549c2f..1273f8807e84 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -112,8 +112,10 @@ struct ib_umad_device { | |||
112 | struct ib_umad_file { | 112 | struct ib_umad_file { |
113 | struct ib_umad_port *port; | 113 | struct ib_umad_port *port; |
114 | struct list_head recv_list; | 114 | struct list_head recv_list; |
115 | struct list_head send_list; | ||
115 | struct list_head port_list; | 116 | struct list_head port_list; |
116 | spinlock_t recv_lock; | 117 | spinlock_t recv_lock; |
118 | spinlock_t send_lock; | ||
117 | wait_queue_head_t recv_wait; | 119 | wait_queue_head_t recv_wait; |
118 | struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; | 120 | struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; |
119 | int agents_dead; | 121 | int agents_dead; |
@@ -177,12 +179,21 @@ static int queue_packet(struct ib_umad_file *file, | |||
177 | return ret; | 179 | return ret; |
178 | } | 180 | } |
179 | 181 | ||
182 | static void dequeue_send(struct ib_umad_file *file, | ||
183 | struct ib_umad_packet *packet) | ||
184 | { | ||
185 | spin_lock_irq(&file->send_lock); | ||
186 | list_del(&packet->list); | ||
187 | spin_unlock_irq(&file->send_lock); | ||
188 | } | ||
189 | |||
180 | static void send_handler(struct ib_mad_agent *agent, | 190 | static void send_handler(struct ib_mad_agent *agent, |
181 | struct ib_mad_send_wc *send_wc) | 191 | struct ib_mad_send_wc *send_wc) |
182 | { | 192 | { |
183 | struct ib_umad_file *file = agent->context; | 193 | struct ib_umad_file *file = agent->context; |
184 | struct ib_umad_packet *packet = send_wc->send_buf->context[0]; | 194 | struct ib_umad_packet *packet = send_wc->send_buf->context[0]; |
185 | 195 | ||
196 | dequeue_send(file, packet); | ||
186 | ib_destroy_ah(packet->msg->ah); | 197 | ib_destroy_ah(packet->msg->ah); |
187 | ib_free_send_mad(packet->msg); | 198 | ib_free_send_mad(packet->msg); |
188 | 199 | ||
@@ -370,6 +381,51 @@ static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf) | |||
370 | return 0; | 381 | return 0; |
371 | } | 382 | } |
372 | 383 | ||
384 | static int same_destination(struct ib_user_mad_hdr *hdr1, | ||
385 | struct ib_user_mad_hdr *hdr2) | ||
386 | { | ||
387 | if (!hdr1->grh_present && !hdr2->grh_present) | ||
388 | return (hdr1->lid == hdr2->lid); | ||
389 | |||
390 | if (hdr1->grh_present && hdr2->grh_present) | ||
391 | return !memcmp(hdr1->gid, hdr2->gid, 16); | ||
392 | |||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | static int is_duplicate(struct ib_umad_file *file, | ||
397 | struct ib_umad_packet *packet) | ||
398 | { | ||
399 | struct ib_umad_packet *sent_packet; | ||
400 | struct ib_mad_hdr *sent_hdr, *hdr; | ||
401 | |||
402 | hdr = (struct ib_mad_hdr *) packet->mad.data; | ||
403 | list_for_each_entry(sent_packet, &file->send_list, list) { | ||
404 | sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data; | ||
405 | |||
406 | if ((hdr->tid != sent_hdr->tid) || | ||
407 | (hdr->mgmt_class != sent_hdr->mgmt_class)) | ||
408 | continue; | ||
409 | |||
410 | /* | ||
411 | * No need to be overly clever here. If two new operations have | ||
412 | * the same TID, reject the second as a duplicate. This is more | ||
413 | * restrictive than required by the spec. | ||
414 | */ | ||
415 | if (!ib_response_mad((struct ib_mad *) hdr)) { | ||
416 | if (!ib_response_mad((struct ib_mad *) sent_hdr)) | ||
417 | return 1; | ||
418 | continue; | ||
419 | } else if (!ib_response_mad((struct ib_mad *) sent_hdr)) | ||
420 | continue; | ||
421 | |||
422 | if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) | ||
423 | return 1; | ||
424 | } | ||
425 | |||
426 | return 0; | ||
427 | } | ||
428 | |||
373 | static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | 429 | static ssize_t ib_umad_write(struct file *filp, const char __user *buf, |
374 | size_t count, loff_t *pos) | 430 | size_t count, loff_t *pos) |
375 | { | 431 | { |
@@ -379,7 +435,6 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
379 | struct ib_ah_attr ah_attr; | 435 | struct ib_ah_attr ah_attr; |
380 | struct ib_ah *ah; | 436 | struct ib_ah *ah; |
381 | struct ib_rmpp_mad *rmpp_mad; | 437 | struct ib_rmpp_mad *rmpp_mad; |
382 | u8 method; | ||
383 | __be64 *tid; | 438 | __be64 *tid; |
384 | int ret, data_len, hdr_len, copy_offset, rmpp_active; | 439 | int ret, data_len, hdr_len, copy_offset, rmpp_active; |
385 | 440 | ||
@@ -473,28 +528,36 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
473 | } | 528 | } |
474 | 529 | ||
475 | /* | 530 | /* |
476 | * If userspace is generating a request that will generate a | 531 | * Set the high-order part of the transaction ID to make MADs from |
477 | * response, we need to make sure the high-order part of the | 532 | * different agents unique, and allow routing responses back to the |
478 | * transaction ID matches the agent being used to send the | 533 | * original requestor. |
479 | * MAD. | ||
480 | */ | 534 | */ |
481 | method = ((struct ib_mad_hdr *) packet->msg->mad)->method; | 535 | if (!ib_response_mad(packet->msg->mad)) { |
482 | |||
483 | if (!(method & IB_MGMT_METHOD_RESP) && | ||
484 | method != IB_MGMT_METHOD_TRAP_REPRESS && | ||
485 | method != IB_MGMT_METHOD_SEND) { | ||
486 | tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; | 536 | tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; |
487 | *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | | 537 | *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | |
488 | (be64_to_cpup(tid) & 0xffffffff)); | 538 | (be64_to_cpup(tid) & 0xffffffff)); |
539 | rmpp_mad->mad_hdr.tid = *tid; | ||
540 | } | ||
541 | |||
542 | spin_lock_irq(&file->send_lock); | ||
543 | ret = is_duplicate(file, packet); | ||
544 | if (!ret) | ||
545 | list_add_tail(&packet->list, &file->send_list); | ||
546 | spin_unlock_irq(&file->send_lock); | ||
547 | if (ret) { | ||
548 | ret = -EINVAL; | ||
549 | goto err_msg; | ||
489 | } | 550 | } |
490 | 551 | ||
491 | ret = ib_post_send_mad(packet->msg, NULL); | 552 | ret = ib_post_send_mad(packet->msg, NULL); |
492 | if (ret) | 553 | if (ret) |
493 | goto err_msg; | 554 | goto err_send; |
494 | 555 | ||
495 | up_read(&file->port->mutex); | 556 | up_read(&file->port->mutex); |
496 | return count; | 557 | return count; |
497 | 558 | ||
559 | err_send: | ||
560 | dequeue_send(file, packet); | ||
498 | err_msg: | 561 | err_msg: |
499 | ib_free_send_mad(packet->msg); | 562 | ib_free_send_mad(packet->msg); |
500 | err_ah: | 563 | err_ah: |
@@ -657,7 +720,9 @@ static int ib_umad_open(struct inode *inode, struct file *filp) | |||
657 | } | 720 | } |
658 | 721 | ||
659 | spin_lock_init(&file->recv_lock); | 722 | spin_lock_init(&file->recv_lock); |
723 | spin_lock_init(&file->send_lock); | ||
660 | INIT_LIST_HEAD(&file->recv_list); | 724 | INIT_LIST_HEAD(&file->recv_list); |
725 | INIT_LIST_HEAD(&file->send_list); | ||
661 | init_waitqueue_head(&file->recv_wait); | 726 | init_waitqueue_head(&file->recv_wait); |
662 | 727 | ||
663 | file->port = port; | 728 | file->port = port; |
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index bb9bee56a824..102a59c033ff 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/kref.h> | 42 | #include <linux/kref.h> |
43 | #include <linux/idr.h> | 43 | #include <linux/idr.h> |
44 | #include <linux/mutex.h> | 44 | #include <linux/mutex.h> |
45 | #include <linux/completion.h> | ||
45 | 46 | ||
46 | #include <rdma/ib_verbs.h> | 47 | #include <rdma/ib_verbs.h> |
47 | #include <rdma/ib_user_verbs.h> | 48 | #include <rdma/ib_user_verbs.h> |
@@ -69,6 +70,7 @@ | |||
69 | 70 | ||
70 | struct ib_uverbs_device { | 71 | struct ib_uverbs_device { |
71 | struct kref ref; | 72 | struct kref ref; |
73 | struct completion comp; | ||
72 | int devnum; | 74 | int devnum; |
73 | struct cdev *dev; | 75 | struct cdev *dev; |
74 | struct class_device *class_dev; | 76 | struct class_device *class_dev; |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index bdf5d5098190..30923eb68ec7 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -42,6 +42,13 @@ | |||
42 | 42 | ||
43 | #include "uverbs.h" | 43 | #include "uverbs.h" |
44 | 44 | ||
45 | static struct lock_class_key pd_lock_key; | ||
46 | static struct lock_class_key mr_lock_key; | ||
47 | static struct lock_class_key cq_lock_key; | ||
48 | static struct lock_class_key qp_lock_key; | ||
49 | static struct lock_class_key ah_lock_key; | ||
50 | static struct lock_class_key srq_lock_key; | ||
51 | |||
45 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ | 52 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ |
46 | do { \ | 53 | do { \ |
47 | (udata)->inbuf = (void __user *) (ibuf); \ | 54 | (udata)->inbuf = (void __user *) (ibuf); \ |
@@ -76,12 +83,13 @@ | |||
76 | */ | 83 | */ |
77 | 84 | ||
78 | static void init_uobj(struct ib_uobject *uobj, u64 user_handle, | 85 | static void init_uobj(struct ib_uobject *uobj, u64 user_handle, |
79 | struct ib_ucontext *context) | 86 | struct ib_ucontext *context, struct lock_class_key *key) |
80 | { | 87 | { |
81 | uobj->user_handle = user_handle; | 88 | uobj->user_handle = user_handle; |
82 | uobj->context = context; | 89 | uobj->context = context; |
83 | kref_init(&uobj->ref); | 90 | kref_init(&uobj->ref); |
84 | init_rwsem(&uobj->mutex); | 91 | init_rwsem(&uobj->mutex); |
92 | lockdep_set_class(&uobj->mutex, key); | ||
85 | uobj->live = 0; | 93 | uobj->live = 0; |
86 | } | 94 | } |
87 | 95 | ||
@@ -470,7 +478,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, | |||
470 | if (!uobj) | 478 | if (!uobj) |
471 | return -ENOMEM; | 479 | return -ENOMEM; |
472 | 480 | ||
473 | init_uobj(uobj, 0, file->ucontext); | 481 | init_uobj(uobj, 0, file->ucontext, &pd_lock_key); |
474 | down_write(&uobj->mutex); | 482 | down_write(&uobj->mutex); |
475 | 483 | ||
476 | pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, | 484 | pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, |
@@ -591,7 +599,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, | |||
591 | if (!obj) | 599 | if (!obj) |
592 | return -ENOMEM; | 600 | return -ENOMEM; |
593 | 601 | ||
594 | init_uobj(&obj->uobject, 0, file->ucontext); | 602 | init_uobj(&obj->uobject, 0, file->ucontext, &mr_lock_key); |
595 | down_write(&obj->uobject.mutex); | 603 | down_write(&obj->uobject.mutex); |
596 | 604 | ||
597 | /* | 605 | /* |
@@ -770,7 +778,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, | |||
770 | if (!obj) | 778 | if (!obj) |
771 | return -ENOMEM; | 779 | return -ENOMEM; |
772 | 780 | ||
773 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext); | 781 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key); |
774 | down_write(&obj->uobject.mutex); | 782 | down_write(&obj->uobject.mutex); |
775 | 783 | ||
776 | if (cmd.comp_channel >= 0) { | 784 | if (cmd.comp_channel >= 0) { |
@@ -1051,13 +1059,14 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1051 | if (!obj) | 1059 | if (!obj) |
1052 | return -ENOMEM; | 1060 | return -ENOMEM; |
1053 | 1061 | ||
1054 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext); | 1062 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); |
1055 | down_write(&obj->uevent.uobject.mutex); | 1063 | down_write(&obj->uevent.uobject.mutex); |
1056 | 1064 | ||
1065 | srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; | ||
1057 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 1066 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); |
1058 | scq = idr_read_cq(cmd.send_cq_handle, file->ucontext); | 1067 | scq = idr_read_cq(cmd.send_cq_handle, file->ucontext); |
1059 | rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext); | 1068 | rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? |
1060 | srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; | 1069 | scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext); |
1061 | 1070 | ||
1062 | if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { | 1071 | if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { |
1063 | ret = -EINVAL; | 1072 | ret = -EINVAL; |
@@ -1125,7 +1134,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1125 | 1134 | ||
1126 | put_pd_read(pd); | 1135 | put_pd_read(pd); |
1127 | put_cq_read(scq); | 1136 | put_cq_read(scq); |
1128 | put_cq_read(rcq); | 1137 | if (rcq != scq) |
1138 | put_cq_read(rcq); | ||
1129 | if (srq) | 1139 | if (srq) |
1130 | put_srq_read(srq); | 1140 | put_srq_read(srq); |
1131 | 1141 | ||
@@ -1150,7 +1160,7 @@ err_put: | |||
1150 | put_pd_read(pd); | 1160 | put_pd_read(pd); |
1151 | if (scq) | 1161 | if (scq) |
1152 | put_cq_read(scq); | 1162 | put_cq_read(scq); |
1153 | if (rcq) | 1163 | if (rcq && rcq != scq) |
1154 | put_cq_read(rcq); | 1164 | put_cq_read(rcq); |
1155 | if (srq) | 1165 | if (srq) |
1156 | put_srq_read(srq); | 1166 | put_srq_read(srq); |
@@ -1751,7 +1761,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, | |||
1751 | if (!uobj) | 1761 | if (!uobj) |
1752 | return -ENOMEM; | 1762 | return -ENOMEM; |
1753 | 1763 | ||
1754 | init_uobj(uobj, cmd.user_handle, file->ucontext); | 1764 | init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key); |
1755 | down_write(&uobj->mutex); | 1765 | down_write(&uobj->mutex); |
1756 | 1766 | ||
1757 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 1767 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); |
@@ -1775,7 +1785,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, | |||
1775 | ah = ib_create_ah(pd, &attr); | 1785 | ah = ib_create_ah(pd, &attr); |
1776 | if (IS_ERR(ah)) { | 1786 | if (IS_ERR(ah)) { |
1777 | ret = PTR_ERR(ah); | 1787 | ret = PTR_ERR(ah); |
1778 | goto err; | 1788 | goto err_put; |
1779 | } | 1789 | } |
1780 | 1790 | ||
1781 | ah->uobject = uobj; | 1791 | ah->uobject = uobj; |
@@ -1811,6 +1821,9 @@ err_copy: | |||
1811 | err_destroy: | 1821 | err_destroy: |
1812 | ib_destroy_ah(ah); | 1822 | ib_destroy_ah(ah); |
1813 | 1823 | ||
1824 | err_put: | ||
1825 | put_pd_read(pd); | ||
1826 | |||
1814 | err: | 1827 | err: |
1815 | put_uobj_write(uobj); | 1828 | put_uobj_write(uobj); |
1816 | return ret; | 1829 | return ret; |
@@ -1963,7 +1976,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, | |||
1963 | if (!obj) | 1976 | if (!obj) |
1964 | return -ENOMEM; | 1977 | return -ENOMEM; |
1965 | 1978 | ||
1966 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext); | 1979 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key); |
1967 | down_write(&obj->uobject.mutex); | 1980 | down_write(&obj->uobject.mutex); |
1968 | 1981 | ||
1969 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 1982 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); |
@@ -1984,7 +1997,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, | |||
1984 | srq = pd->device->create_srq(pd, &attr, &udata); | 1997 | srq = pd->device->create_srq(pd, &attr, &udata); |
1985 | if (IS_ERR(srq)) { | 1998 | if (IS_ERR(srq)) { |
1986 | ret = PTR_ERR(srq); | 1999 | ret = PTR_ERR(srq); |
1987 | goto err; | 2000 | goto err_put; |
1988 | } | 2001 | } |
1989 | 2002 | ||
1990 | srq->device = pd->device; | 2003 | srq->device = pd->device; |
@@ -2029,6 +2042,9 @@ err_copy: | |||
2029 | err_destroy: | 2042 | err_destroy: |
2030 | ib_destroy_srq(srq); | 2043 | ib_destroy_srq(srq); |
2031 | 2044 | ||
2045 | err_put: | ||
2046 | put_pd_read(pd); | ||
2047 | |||
2032 | err: | 2048 | err: |
2033 | put_uobj_write(&obj->uobject); | 2049 | put_uobj_write(&obj->uobject); |
2034 | return ret; | 2050 | return ret; |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index e725cccc7cde..4e16314e8e6d 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -122,7 +122,7 @@ static void ib_uverbs_release_dev(struct kref *ref) | |||
122 | struct ib_uverbs_device *dev = | 122 | struct ib_uverbs_device *dev = |
123 | container_of(ref, struct ib_uverbs_device, ref); | 123 | container_of(ref, struct ib_uverbs_device, ref); |
124 | 124 | ||
125 | kfree(dev); | 125 | complete(&dev->comp); |
126 | } | 126 | } |
127 | 127 | ||
128 | void ib_uverbs_release_ucq(struct ib_uverbs_file *file, | 128 | void ib_uverbs_release_ucq(struct ib_uverbs_file *file, |
@@ -740,6 +740,7 @@ static void ib_uverbs_add_one(struct ib_device *device) | |||
740 | return; | 740 | return; |
741 | 741 | ||
742 | kref_init(&uverbs_dev->ref); | 742 | kref_init(&uverbs_dev->ref); |
743 | init_completion(&uverbs_dev->comp); | ||
743 | 744 | ||
744 | spin_lock(&map_lock); | 745 | spin_lock(&map_lock); |
745 | uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); | 746 | uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); |
@@ -793,6 +794,8 @@ err_cdev: | |||
793 | 794 | ||
794 | err: | 795 | err: |
795 | kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); | 796 | kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); |
797 | wait_for_completion(&uverbs_dev->comp); | ||
798 | kfree(uverbs_dev); | ||
796 | return; | 799 | return; |
797 | } | 800 | } |
798 | 801 | ||
@@ -812,7 +815,10 @@ static void ib_uverbs_remove_one(struct ib_device *device) | |||
812 | spin_unlock(&map_lock); | 815 | spin_unlock(&map_lock); |
813 | 816 | ||
814 | clear_bit(uverbs_dev->devnum, dev_map); | 817 | clear_bit(uverbs_dev->devnum, dev_map); |
818 | |||
815 | kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); | 819 | kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); |
820 | wait_for_completion(&uverbs_dev->comp); | ||
821 | kfree(uverbs_dev); | ||
816 | } | 822 | } |
817 | 823 | ||
818 | static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags, | 824 | static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags, |