aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/cma.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-23 12:56:11 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-23 12:56:11 -0400
commit0b776eb5426752d4e53354ac89e3710d857e09a7 (patch)
tree1eebeeaabab90de5834b32e72d2e259dc8a4a635 /drivers/infiniband/core/cma.c
parent0d6810091cdbd05efeb31654c6a41a6cbdfdd2c8 (diff)
parent77109cc2823f025ccd66ebd9b88fbab90437b2d8 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: mlx4_core: Increase command timeout for INIT_HCA to 10 seconds IPoIB/cm: Use common CQ for CM send completions IB/uverbs: Fix checking of userspace object ownership IB/mlx4: Sanity check userspace send queue sizes IPoIB: Rewrite "if (!likely(...))" as "if (unlikely(!(...)))" IB/ehca: Enable large page MRs by default IB/ehca: Change meaning of hca_cap_mr_pgsize IB/ehca: Fix ehca_encode_hwpage_size() and alloc_fmr() IB/ehca: Fix masking error in {,re}reg_phys_mr() IB/ehca: Supply QP token for SRQ base QPs IPoIB: Use round_jiffies() for ah_reap_task RDMA/cma: Fix deadlock destroying listen requests RDMA/cma: Add locking around QP accesses IB/mthca: Avoid alignment traps when writing doorbells mlx4_core: Kill mlx4_write64_raw()
Diffstat (limited to 'drivers/infiniband/core/cma.c')
-rw-r--r--drivers/infiniband/core/cma.c160
1 files changed, 83 insertions, 77 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d08fb30768bc..0751697ef984 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -114,13 +114,16 @@ struct rdma_id_private {
114 114
115 struct rdma_bind_list *bind_list; 115 struct rdma_bind_list *bind_list;
116 struct hlist_node node; 116 struct hlist_node node;
117 struct list_head list; 117 struct list_head list; /* listen_any_list or cma_device.list */
118 struct list_head listen_list; 118 struct list_head listen_list; /* per device listens */
119 struct cma_device *cma_dev; 119 struct cma_device *cma_dev;
120 struct list_head mc_list; 120 struct list_head mc_list;
121 121
122 int internal_id;
122 enum cma_state state; 123 enum cma_state state;
123 spinlock_t lock; 124 spinlock_t lock;
125 struct mutex qp_mutex;
126
124 struct completion comp; 127 struct completion comp;
125 atomic_t refcount; 128 atomic_t refcount;
126 wait_queue_head_t wait_remove; 129 wait_queue_head_t wait_remove;
@@ -389,6 +392,7 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
389 id_priv->id.event_handler = event_handler; 392 id_priv->id.event_handler = event_handler;
390 id_priv->id.ps = ps; 393 id_priv->id.ps = ps;
391 spin_lock_init(&id_priv->lock); 394 spin_lock_init(&id_priv->lock);
395 mutex_init(&id_priv->qp_mutex);
392 init_completion(&id_priv->comp); 396 init_completion(&id_priv->comp);
393 atomic_set(&id_priv->refcount, 1); 397 atomic_set(&id_priv->refcount, 1);
394 init_waitqueue_head(&id_priv->wait_remove); 398 init_waitqueue_head(&id_priv->wait_remove);
@@ -474,61 +478,86 @@ EXPORT_SYMBOL(rdma_create_qp);
474 478
475void rdma_destroy_qp(struct rdma_cm_id *id) 479void rdma_destroy_qp(struct rdma_cm_id *id)
476{ 480{
477 ib_destroy_qp(id->qp); 481 struct rdma_id_private *id_priv;
482
483 id_priv = container_of(id, struct rdma_id_private, id);
484 mutex_lock(&id_priv->qp_mutex);
485 ib_destroy_qp(id_priv->id.qp);
486 id_priv->id.qp = NULL;
487 mutex_unlock(&id_priv->qp_mutex);
478} 488}
479EXPORT_SYMBOL(rdma_destroy_qp); 489EXPORT_SYMBOL(rdma_destroy_qp);
480 490
481static int cma_modify_qp_rtr(struct rdma_cm_id *id) 491static int cma_modify_qp_rtr(struct rdma_id_private *id_priv)
482{ 492{
483 struct ib_qp_attr qp_attr; 493 struct ib_qp_attr qp_attr;
484 int qp_attr_mask, ret; 494 int qp_attr_mask, ret;
485 495
486 if (!id->qp) 496 mutex_lock(&id_priv->qp_mutex);
487 return 0; 497 if (!id_priv->id.qp) {
498 ret = 0;
499 goto out;
500 }
488 501
489 /* Need to update QP attributes from default values. */ 502 /* Need to update QP attributes from default values. */
490 qp_attr.qp_state = IB_QPS_INIT; 503 qp_attr.qp_state = IB_QPS_INIT;
491 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask); 504 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
492 if (ret) 505 if (ret)
493 return ret; 506 goto out;
494 507
495 ret = ib_modify_qp(id->qp, &qp_attr, qp_attr_mask); 508 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
496 if (ret) 509 if (ret)
497 return ret; 510 goto out;
498 511
499 qp_attr.qp_state = IB_QPS_RTR; 512 qp_attr.qp_state = IB_QPS_RTR;
500 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask); 513 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
501 if (ret) 514 if (ret)
502 return ret; 515 goto out;
503 516
504 return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask); 517 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
518out:
519 mutex_unlock(&id_priv->qp_mutex);
520 return ret;
505} 521}
506 522
507static int cma_modify_qp_rts(struct rdma_cm_id *id) 523static int cma_modify_qp_rts(struct rdma_id_private *id_priv)
508{ 524{
509 struct ib_qp_attr qp_attr; 525 struct ib_qp_attr qp_attr;
510 int qp_attr_mask, ret; 526 int qp_attr_mask, ret;
511 527
512 if (!id->qp) 528 mutex_lock(&id_priv->qp_mutex);
513 return 0; 529 if (!id_priv->id.qp) {
530 ret = 0;
531 goto out;
532 }
514 533
515 qp_attr.qp_state = IB_QPS_RTS; 534 qp_attr.qp_state = IB_QPS_RTS;
516 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask); 535 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
517 if (ret) 536 if (ret)
518 return ret; 537 goto out;
519 538
520 return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask); 539 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
540out:
541 mutex_unlock(&id_priv->qp_mutex);
542 return ret;
521} 543}
522 544
523static int cma_modify_qp_err(struct rdma_cm_id *id) 545static int cma_modify_qp_err(struct rdma_id_private *id_priv)
524{ 546{
525 struct ib_qp_attr qp_attr; 547 struct ib_qp_attr qp_attr;
548 int ret;
526 549
527 if (!id->qp) 550 mutex_lock(&id_priv->qp_mutex);
528 return 0; 551 if (!id_priv->id.qp) {
552 ret = 0;
553 goto out;
554 }
529 555
530 qp_attr.qp_state = IB_QPS_ERR; 556 qp_attr.qp_state = IB_QPS_ERR;
531 return ib_modify_qp(id->qp, &qp_attr, IB_QP_STATE); 557 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
558out:
559 mutex_unlock(&id_priv->qp_mutex);
560 return ret;
532} 561}
533 562
534static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 563static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
@@ -717,50 +746,27 @@ static void cma_cancel_route(struct rdma_id_private *id_priv)
717 } 746 }
718} 747}
719 748
720static inline int cma_internal_listen(struct rdma_id_private *id_priv)
721{
722 return (id_priv->state == CMA_LISTEN) && id_priv->cma_dev &&
723 cma_any_addr(&id_priv->id.route.addr.src_addr);
724}
725
726static void cma_destroy_listen(struct rdma_id_private *id_priv)
727{
728 cma_exch(id_priv, CMA_DESTROYING);
729
730 if (id_priv->cma_dev) {
731 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
732 case RDMA_TRANSPORT_IB:
733 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
734 ib_destroy_cm_id(id_priv->cm_id.ib);
735 break;
736 case RDMA_TRANSPORT_IWARP:
737 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
738 iw_destroy_cm_id(id_priv->cm_id.iw);
739 break;
740 default:
741 break;
742 }
743 cma_detach_from_dev(id_priv);
744 }
745 list_del(&id_priv->listen_list);
746
747 cma_deref_id(id_priv);
748 wait_for_completion(&id_priv->comp);
749
750 kfree(id_priv);
751}
752
753static void cma_cancel_listens(struct rdma_id_private *id_priv) 749static void cma_cancel_listens(struct rdma_id_private *id_priv)
754{ 750{
755 struct rdma_id_private *dev_id_priv; 751 struct rdma_id_private *dev_id_priv;
756 752
753 /*
754 * Remove from listen_any_list to prevent added devices from spawning
755 * additional listen requests.
756 */
757 mutex_lock(&lock); 757 mutex_lock(&lock);
758 list_del(&id_priv->list); 758 list_del(&id_priv->list);
759 759
760 while (!list_empty(&id_priv->listen_list)) { 760 while (!list_empty(&id_priv->listen_list)) {
761 dev_id_priv = list_entry(id_priv->listen_list.next, 761 dev_id_priv = list_entry(id_priv->listen_list.next,
762 struct rdma_id_private, listen_list); 762 struct rdma_id_private, listen_list);
763 cma_destroy_listen(dev_id_priv); 763 /* sync with device removal to avoid duplicate destruction */
764 list_del_init(&dev_id_priv->list);
765 list_del(&dev_id_priv->listen_list);
766 mutex_unlock(&lock);
767
768 rdma_destroy_id(&dev_id_priv->id);
769 mutex_lock(&lock);
764 } 770 }
765 mutex_unlock(&lock); 771 mutex_unlock(&lock);
766} 772}
@@ -848,6 +854,9 @@ void rdma_destroy_id(struct rdma_cm_id *id)
848 cma_deref_id(id_priv); 854 cma_deref_id(id_priv);
849 wait_for_completion(&id_priv->comp); 855 wait_for_completion(&id_priv->comp);
850 856
857 if (id_priv->internal_id)
858 cma_deref_id(id_priv->id.context);
859
851 kfree(id_priv->id.route.path_rec); 860 kfree(id_priv->id.route.path_rec);
852 kfree(id_priv); 861 kfree(id_priv);
853} 862}
@@ -857,11 +866,11 @@ static int cma_rep_recv(struct rdma_id_private *id_priv)
857{ 866{
858 int ret; 867 int ret;
859 868
860 ret = cma_modify_qp_rtr(&id_priv->id); 869 ret = cma_modify_qp_rtr(id_priv);
861 if (ret) 870 if (ret)
862 goto reject; 871 goto reject;
863 872
864 ret = cma_modify_qp_rts(&id_priv->id); 873 ret = cma_modify_qp_rts(id_priv);
865 if (ret) 874 if (ret)
866 goto reject; 875 goto reject;
867 876
@@ -871,7 +880,7 @@ static int cma_rep_recv(struct rdma_id_private *id_priv)
871 880
872 return 0; 881 return 0;
873reject: 882reject:
874 cma_modify_qp_err(&id_priv->id); 883 cma_modify_qp_err(id_priv);
875 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 884 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
876 NULL, 0, NULL, 0); 885 NULL, 0, NULL, 0);
877 return ret; 886 return ret;
@@ -947,7 +956,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
947 /* ignore event */ 956 /* ignore event */
948 goto out; 957 goto out;
949 case IB_CM_REJ_RECEIVED: 958 case IB_CM_REJ_RECEIVED:
950 cma_modify_qp_err(&id_priv->id); 959 cma_modify_qp_err(id_priv);
951 event.status = ib_event->param.rej_rcvd.reason; 960 event.status = ib_event->param.rej_rcvd.reason;
952 event.event = RDMA_CM_EVENT_REJECTED; 961 event.event = RDMA_CM_EVENT_REJECTED;
953 event.param.conn.private_data = ib_event->private_data; 962 event.param.conn.private_data = ib_event->private_data;
@@ -1404,14 +1413,13 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1404 1413
1405 cma_attach_to_dev(dev_id_priv, cma_dev); 1414 cma_attach_to_dev(dev_id_priv, cma_dev);
1406 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); 1415 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
1416 atomic_inc(&id_priv->refcount);
1417 dev_id_priv->internal_id = 1;
1407 1418
1408 ret = rdma_listen(id, id_priv->backlog); 1419 ret = rdma_listen(id, id_priv->backlog);
1409 if (ret) 1420 if (ret)
1410 goto err; 1421 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
1411 1422 "listening on device %s", ret, cma_dev->device->name);
1412 return;
1413err:
1414 cma_destroy_listen(dev_id_priv);
1415} 1423}
1416 1424
1417static void cma_listen_on_all(struct rdma_id_private *id_priv) 1425static void cma_listen_on_all(struct rdma_id_private *id_priv)
@@ -2264,7 +2272,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
2264 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr; 2272 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
2265 cm_id->remote_addr = *sin; 2273 cm_id->remote_addr = *sin;
2266 2274
2267 ret = cma_modify_qp_rtr(&id_priv->id); 2275 ret = cma_modify_qp_rtr(id_priv);
2268 if (ret) 2276 if (ret)
2269 goto out; 2277 goto out;
2270 2278
@@ -2331,7 +2339,7 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
2331 int qp_attr_mask, ret; 2339 int qp_attr_mask, ret;
2332 2340
2333 if (id_priv->id.qp) { 2341 if (id_priv->id.qp) {
2334 ret = cma_modify_qp_rtr(&id_priv->id); 2342 ret = cma_modify_qp_rtr(id_priv);
2335 if (ret) 2343 if (ret)
2336 goto out; 2344 goto out;
2337 2345
@@ -2370,7 +2378,7 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
2370 struct iw_cm_conn_param iw_param; 2378 struct iw_cm_conn_param iw_param;
2371 int ret; 2379 int ret;
2372 2380
2373 ret = cma_modify_qp_rtr(&id_priv->id); 2381 ret = cma_modify_qp_rtr(id_priv);
2374 if (ret) 2382 if (ret)
2375 return ret; 2383 return ret;
2376 2384
@@ -2442,7 +2450,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2442 2450
2443 return 0; 2451 return 0;
2444reject: 2452reject:
2445 cma_modify_qp_err(id); 2453 cma_modify_qp_err(id_priv);
2446 rdma_reject(id, NULL, 0); 2454 rdma_reject(id, NULL, 0);
2447 return ret; 2455 return ret;
2448} 2456}
@@ -2512,7 +2520,7 @@ int rdma_disconnect(struct rdma_cm_id *id)
2512 2520
2513 switch (rdma_node_get_transport(id->device->node_type)) { 2521 switch (rdma_node_get_transport(id->device->node_type)) {
2514 case RDMA_TRANSPORT_IB: 2522 case RDMA_TRANSPORT_IB:
2515 ret = cma_modify_qp_err(id); 2523 ret = cma_modify_qp_err(id_priv);
2516 if (ret) 2524 if (ret)
2517 goto out; 2525 goto out;
2518 /* Initiate or respond to a disconnect. */ 2526 /* Initiate or respond to a disconnect. */
@@ -2543,9 +2551,11 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2543 cma_disable_remove(id_priv, CMA_ADDR_RESOLVED)) 2551 cma_disable_remove(id_priv, CMA_ADDR_RESOLVED))
2544 return 0; 2552 return 0;
2545 2553
2554 mutex_lock(&id_priv->qp_mutex);
2546 if (!status && id_priv->id.qp) 2555 if (!status && id_priv->id.qp)
2547 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 2556 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
2548 multicast->rec.mlid); 2557 multicast->rec.mlid);
2558 mutex_unlock(&id_priv->qp_mutex);
2549 2559
2550 memset(&event, 0, sizeof event); 2560 memset(&event, 0, sizeof event);
2551 event.status = status; 2561 event.status = status;
@@ -2757,16 +2767,12 @@ static void cma_process_remove(struct cma_device *cma_dev)
2757 id_priv = list_entry(cma_dev->id_list.next, 2767 id_priv = list_entry(cma_dev->id_list.next,
2758 struct rdma_id_private, list); 2768 struct rdma_id_private, list);
2759 2769
2760 if (cma_internal_listen(id_priv)) { 2770 list_del(&id_priv->listen_list);
2761 cma_destroy_listen(id_priv);
2762 continue;
2763 }
2764
2765 list_del_init(&id_priv->list); 2771 list_del_init(&id_priv->list);
2766 atomic_inc(&id_priv->refcount); 2772 atomic_inc(&id_priv->refcount);
2767 mutex_unlock(&lock); 2773 mutex_unlock(&lock);
2768 2774
2769 ret = cma_remove_id_dev(id_priv); 2775 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
2770 cma_deref_id(id_priv); 2776 cma_deref_id(id_priv);
2771 if (ret) 2777 if (ret)
2772 rdma_destroy_id(&id_priv->id); 2778 rdma_destroy_id(&id_priv->id);