diff options
author | Sean Hefty <sean.hefty@intel.com> | 2007-10-09 14:12:34 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-10-16 15:25:49 -0400 |
commit | d02d1f5359e795bac9a4461698521680cddd782b (patch) | |
tree | f90595e51f9f006155bad6a68b67f5ee52c32589 /drivers/infiniband | |
parent | c5483388bb4d771007ef36478db038e07922a020 (diff) |
RDMA/cma: Fix deadlock destroying listen requests
Deadlock condition reported by Kanoj Sarcar <kanoj@netxen.com>.
The deadlock occurs when a connection request arrives at the same
time that a wildcard listen is being destroyed.
A wildcard listen maintains per device listen requests for each
RDMA device in the system. The per device listens are automatically
added and removed when RDMA devices are inserted or removed from
the system.
When a wildcard listen is destroyed, rdma_destroy_id() acquires
the rdma_cm's device mutex ('lock') to protect against hot-plug
events adding or removing per device listens. It then tries to
destroy the per device listens by calling ib_destroy_cm_id() or
iw_destroy_cm_id(). It does this while holding the device mutex.
However, if the underlying iw/ib CM reports a connection request
while this is occurring, the rdma_cm callback function will try
to acquire the same device mutex. Since we're in a callback,
the ib_destroy_cm_id() or iw_destroy_cm_id() calls will block until
their callback thread returns, but the callback is blocked waiting for
the device mutex.
Fix this by re-working how per device listens are destroyed. Use
rdma_destroy_id(), which avoids the deadlock, in place of
cma_destroy_listen(). Additional synchronization is added to handle
device hot-plug events and ensure that the id is not destroyed twice.
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/cma.c | 70 |
1 files changed, 23 insertions, 47 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 01ae052ac3f4..ee946cc2576b 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -114,11 +114,12 @@ struct rdma_id_private { | |||
114 | 114 | ||
115 | struct rdma_bind_list *bind_list; | 115 | struct rdma_bind_list *bind_list; |
116 | struct hlist_node node; | 116 | struct hlist_node node; |
117 | struct list_head list; | 117 | struct list_head list; /* listen_any_list or cma_device.list */ |
118 | struct list_head listen_list; | 118 | struct list_head listen_list; /* per device listens */ |
119 | struct cma_device *cma_dev; | 119 | struct cma_device *cma_dev; |
120 | struct list_head mc_list; | 120 | struct list_head mc_list; |
121 | 121 | ||
122 | int internal_id; | ||
122 | enum cma_state state; | 123 | enum cma_state state; |
123 | spinlock_t lock; | 124 | spinlock_t lock; |
124 | struct mutex qp_mutex; | 125 | struct mutex qp_mutex; |
@@ -745,50 +746,27 @@ static void cma_cancel_route(struct rdma_id_private *id_priv) | |||
745 | } | 746 | } |
746 | } | 747 | } |
747 | 748 | ||
748 | static inline int cma_internal_listen(struct rdma_id_private *id_priv) | ||
749 | { | ||
750 | return (id_priv->state == CMA_LISTEN) && id_priv->cma_dev && | ||
751 | cma_any_addr(&id_priv->id.route.addr.src_addr); | ||
752 | } | ||
753 | |||
754 | static void cma_destroy_listen(struct rdma_id_private *id_priv) | ||
755 | { | ||
756 | cma_exch(id_priv, CMA_DESTROYING); | ||
757 | |||
758 | if (id_priv->cma_dev) { | ||
759 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { | ||
760 | case RDMA_TRANSPORT_IB: | ||
761 | if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) | ||
762 | ib_destroy_cm_id(id_priv->cm_id.ib); | ||
763 | break; | ||
764 | case RDMA_TRANSPORT_IWARP: | ||
765 | if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) | ||
766 | iw_destroy_cm_id(id_priv->cm_id.iw); | ||
767 | break; | ||
768 | default: | ||
769 | break; | ||
770 | } | ||
771 | cma_detach_from_dev(id_priv); | ||
772 | } | ||
773 | list_del(&id_priv->listen_list); | ||
774 | |||
775 | cma_deref_id(id_priv); | ||
776 | wait_for_completion(&id_priv->comp); | ||
777 | |||
778 | kfree(id_priv); | ||
779 | } | ||
780 | |||
781 | static void cma_cancel_listens(struct rdma_id_private *id_priv) | 749 | static void cma_cancel_listens(struct rdma_id_private *id_priv) |
782 | { | 750 | { |
783 | struct rdma_id_private *dev_id_priv; | 751 | struct rdma_id_private *dev_id_priv; |
784 | 752 | ||
753 | /* | ||
754 | * Remove from listen_any_list to prevent added devices from spawning | ||
755 | * additional listen requests. | ||
756 | */ | ||
785 | mutex_lock(&lock); | 757 | mutex_lock(&lock); |
786 | list_del(&id_priv->list); | 758 | list_del(&id_priv->list); |
787 | 759 | ||
788 | while (!list_empty(&id_priv->listen_list)) { | 760 | while (!list_empty(&id_priv->listen_list)) { |
789 | dev_id_priv = list_entry(id_priv->listen_list.next, | 761 | dev_id_priv = list_entry(id_priv->listen_list.next, |
790 | struct rdma_id_private, listen_list); | 762 | struct rdma_id_private, listen_list); |
791 | cma_destroy_listen(dev_id_priv); | 763 | /* sync with device removal to avoid duplicate destruction */ |
764 | list_del_init(&dev_id_priv->list); | ||
765 | list_del(&dev_id_priv->listen_list); | ||
766 | mutex_unlock(&lock); | ||
767 | |||
768 | rdma_destroy_id(&dev_id_priv->id); | ||
769 | mutex_lock(&lock); | ||
792 | } | 770 | } |
793 | mutex_unlock(&lock); | 771 | mutex_unlock(&lock); |
794 | } | 772 | } |
@@ -876,6 +854,9 @@ void rdma_destroy_id(struct rdma_cm_id *id) | |||
876 | cma_deref_id(id_priv); | 854 | cma_deref_id(id_priv); |
877 | wait_for_completion(&id_priv->comp); | 855 | wait_for_completion(&id_priv->comp); |
878 | 856 | ||
857 | if (id_priv->internal_id) | ||
858 | cma_deref_id(id_priv->id.context); | ||
859 | |||
879 | kfree(id_priv->id.route.path_rec); | 860 | kfree(id_priv->id.route.path_rec); |
880 | kfree(id_priv); | 861 | kfree(id_priv); |
881 | } | 862 | } |
@@ -1432,14 +1413,13 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, | |||
1432 | 1413 | ||
1433 | cma_attach_to_dev(dev_id_priv, cma_dev); | 1414 | cma_attach_to_dev(dev_id_priv, cma_dev); |
1434 | list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); | 1415 | list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); |
1416 | atomic_inc(&id_priv->refcount); | ||
1417 | dev_id_priv->internal_id = 1; | ||
1435 | 1418 | ||
1436 | ret = rdma_listen(id, id_priv->backlog); | 1419 | ret = rdma_listen(id, id_priv->backlog); |
1437 | if (ret) | 1420 | if (ret) |
1438 | goto err; | 1421 | printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, " |
1439 | 1422 | "listening on device %s", ret, cma_dev->device->name); | |
1440 | return; | ||
1441 | err: | ||
1442 | cma_destroy_listen(dev_id_priv); | ||
1443 | } | 1423 | } |
1444 | 1424 | ||
1445 | static void cma_listen_on_all(struct rdma_id_private *id_priv) | 1425 | static void cma_listen_on_all(struct rdma_id_private *id_priv) |
@@ -2787,16 +2767,12 @@ static void cma_process_remove(struct cma_device *cma_dev) | |||
2787 | id_priv = list_entry(cma_dev->id_list.next, | 2767 | id_priv = list_entry(cma_dev->id_list.next, |
2788 | struct rdma_id_private, list); | 2768 | struct rdma_id_private, list); |
2789 | 2769 | ||
2790 | if (cma_internal_listen(id_priv)) { | 2770 | list_del(&id_priv->listen_list); |
2791 | cma_destroy_listen(id_priv); | ||
2792 | continue; | ||
2793 | } | ||
2794 | |||
2795 | list_del_init(&id_priv->list); | 2771 | list_del_init(&id_priv->list); |
2796 | atomic_inc(&id_priv->refcount); | 2772 | atomic_inc(&id_priv->refcount); |
2797 | mutex_unlock(&lock); | 2773 | mutex_unlock(&lock); |
2798 | 2774 | ||
2799 | ret = cma_remove_id_dev(id_priv); | 2775 | ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); |
2800 | cma_deref_id(id_priv); | 2776 | cma_deref_id(id_priv); |
2801 | if (ret) | 2777 | if (ret) |
2802 | rdma_destroy_id(&id_priv->id); | 2778 | rdma_destroy_id(&id_priv->id); |