diff options
author | Sean Hefty <mshefty@ichips.intel.com> | 2006-05-12 17:57:52 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-05-12 17:57:52 -0400 |
commit | 1b52fa98edd1c3e663ea4a06519e3d20976084a8 (patch) | |
tree | 178d5fd1fe2230b39f49cd36f481024e49878eb1 /drivers/infiniband/core/cm.c | |
parent | 6f4bb3d8205d943acafa2f536f37131777524b67 (diff) |
IB: refcount race fixes
Fix race condition during destruction calls to avoid possibility of
accessing object after it has been freed. Instead of waking up a wait
queue directly, which is susceptible to a race where the object is
freed between the reference count going to 0 and the wake_up(), use a
completion to wait in the function doing the freeing.
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/core/cm.c')
-rw-r--r-- | drivers/infiniband/core/cm.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 7cfedb8d9bc..86fee43502c 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -34,6 +34,8 @@ | |||
34 | * | 34 | * |
35 | * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $ | 35 | * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $ |
36 | */ | 36 | */ |
37 | |||
38 | #include <linux/completion.h> | ||
37 | #include <linux/dma-mapping.h> | 39 | #include <linux/dma-mapping.h> |
38 | #include <linux/err.h> | 40 | #include <linux/err.h> |
39 | #include <linux/idr.h> | 41 | #include <linux/idr.h> |
@@ -122,7 +124,7 @@ struct cm_id_private { | |||
122 | struct rb_node service_node; | 124 | struct rb_node service_node; |
123 | struct rb_node sidr_id_node; | 125 | struct rb_node sidr_id_node; |
124 | spinlock_t lock; /* Do not acquire inside cm.lock */ | 126 | spinlock_t lock; /* Do not acquire inside cm.lock */ |
125 | wait_queue_head_t wait; | 127 | struct completion comp; |
126 | atomic_t refcount; | 128 | atomic_t refcount; |
127 | 129 | ||
128 | struct ib_mad_send_buf *msg; | 130 | struct ib_mad_send_buf *msg; |
@@ -159,7 +161,7 @@ static void cm_work_handler(void *data); | |||
159 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) | 161 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) |
160 | { | 162 | { |
161 | if (atomic_dec_and_test(&cm_id_priv->refcount)) | 163 | if (atomic_dec_and_test(&cm_id_priv->refcount)) |
162 | wake_up(&cm_id_priv->wait); | 164 | complete(&cm_id_priv->comp); |
163 | } | 165 | } |
164 | 166 | ||
165 | static int cm_alloc_msg(struct cm_id_private *cm_id_priv, | 167 | static int cm_alloc_msg(struct cm_id_private *cm_id_priv, |
@@ -559,7 +561,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, | |||
559 | goto error; | 561 | goto error; |
560 | 562 | ||
561 | spin_lock_init(&cm_id_priv->lock); | 563 | spin_lock_init(&cm_id_priv->lock); |
562 | init_waitqueue_head(&cm_id_priv->wait); | 564 | init_completion(&cm_id_priv->comp); |
563 | INIT_LIST_HEAD(&cm_id_priv->work_list); | 565 | INIT_LIST_HEAD(&cm_id_priv->work_list); |
564 | atomic_set(&cm_id_priv->work_count, -1); | 566 | atomic_set(&cm_id_priv->work_count, -1); |
565 | atomic_set(&cm_id_priv->refcount, 1); | 567 | atomic_set(&cm_id_priv->refcount, 1); |
@@ -724,8 +726,8 @@ retest: | |||
724 | } | 726 | } |
725 | 727 | ||
726 | cm_free_id(cm_id->local_id); | 728 | cm_free_id(cm_id->local_id); |
727 | atomic_dec(&cm_id_priv->refcount); | 729 | cm_deref_id(cm_id_priv); |
728 | wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount)); | 730 | wait_for_completion(&cm_id_priv->comp); |
729 | while ((work = cm_dequeue_work(cm_id_priv)) != NULL) | 731 | while ((work = cm_dequeue_work(cm_id_priv)) != NULL) |
730 | cm_free_work(work); | 732 | cm_free_work(work); |
731 | if (cm_id_priv->private_data && cm_id_priv->private_data_len) | 733 | if (cm_id_priv->private_data && cm_id_priv->private_data_len) |