aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2008-10-07 18:50:03 -0400
committerDavid S. Miller <davem@davemloft.net>2008-10-07 18:50:03 -0400
commit58ec3b4db9eb5a28e3aec5f407a54e28f7039c19 (patch)
tree224232795e1a2d5966c27b74008714881682644b /net
parent742201e7baf66c64995fdd033d706220e6208fab (diff)
net: Fix netdev_run_todo dead-lock
Benjamin Thery tracked down a bug that explains many instances of the error unregister_netdevice: waiting for %s to become free. Usage count = %d It turns out that netdev_run_todo can dead-lock with itself if a second instance of it is run in a thread that will then free a reference to the device waited on by the first instance. The problem is really quite silly. We were trying to create parallelism where none was required. As netdev_run_todo always follows a RTNL section, and that todo tasks can only be added with the RTNL held, by definition you should only need to wait for the very ones that you've added and be done with it. There is no need for a second mutex or spinlock. This is exactly what the following patch does. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c27
-rw-r--r--net/core/rtnetlink.c2
2 files changed, 7 insertions, 22 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index fd992c0f2717..0ae08d3f57e7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3812,14 +3812,11 @@ static int dev_new_index(struct net *net)
3812} 3812}
3813 3813
3814/* Delayed registration/unregisteration */ 3814/* Delayed registration/unregisteration */
3815static DEFINE_SPINLOCK(net_todo_list_lock);
3816static LIST_HEAD(net_todo_list); 3815static LIST_HEAD(net_todo_list);
3817 3816
3818static void net_set_todo(struct net_device *dev) 3817static void net_set_todo(struct net_device *dev)
3819{ 3818{
3820 spin_lock(&net_todo_list_lock);
3821 list_add_tail(&dev->todo_list, &net_todo_list); 3819 list_add_tail(&dev->todo_list, &net_todo_list);
3822 spin_unlock(&net_todo_list_lock);
3823} 3820}
3824 3821
3825static void rollback_registered(struct net_device *dev) 3822static void rollback_registered(struct net_device *dev)
@@ -4146,33 +4143,24 @@ static void netdev_wait_allrefs(struct net_device *dev)
4146 * free_netdev(y1); 4143 * free_netdev(y1);
4147 * free_netdev(y2); 4144 * free_netdev(y2);
4148 * 4145 *
4149 * We are invoked by rtnl_unlock() after it drops the semaphore. 4146 * We are invoked by rtnl_unlock().
4150 * This allows us to deal with problems: 4147 * This allows us to deal with problems:
4151 * 1) We can delete sysfs objects which invoke hotplug 4148 * 1) We can delete sysfs objects which invoke hotplug
4152 * without deadlocking with linkwatch via keventd. 4149 * without deadlocking with linkwatch via keventd.
4153 * 2) Since we run with the RTNL semaphore not held, we can sleep 4150 * 2) Since we run with the RTNL semaphore not held, we can sleep
4154 * safely in order to wait for the netdev refcnt to drop to zero. 4151 * safely in order to wait for the netdev refcnt to drop to zero.
4152 *
4153 * We must not return until all unregister events added during
4154 * the interval the lock was held have been completed.
4155 */ 4155 */
4156static DEFINE_MUTEX(net_todo_run_mutex);
4157void netdev_run_todo(void) 4156void netdev_run_todo(void)
4158{ 4157{
4159 struct list_head list; 4158 struct list_head list;
4160 4159
4161 /* Need to guard against multiple cpu's getting out of order. */
4162 mutex_lock(&net_todo_run_mutex);
4163
4164 /* Not safe to do outside the semaphore. We must not return
4165 * until all unregister events invoked by the local processor
4166 * have been completed (either by this todo run, or one on
4167 * another cpu).
4168 */
4169 if (list_empty(&net_todo_list))
4170 goto out;
4171
4172 /* Snapshot list, allow later requests */ 4160 /* Snapshot list, allow later requests */
4173 spin_lock(&net_todo_list_lock);
4174 list_replace_init(&net_todo_list, &list); 4161 list_replace_init(&net_todo_list, &list);
4175 spin_unlock(&net_todo_list_lock); 4162
4163 __rtnl_unlock();
4176 4164
4177 while (!list_empty(&list)) { 4165 while (!list_empty(&list)) {
4178 struct net_device *dev 4166 struct net_device *dev
@@ -4204,9 +4192,6 @@ void netdev_run_todo(void)
4204 /* Free network device */ 4192 /* Free network device */
4205 kobject_put(&dev->dev.kobj); 4193 kobject_put(&dev->dev.kobj);
4206 } 4194 }
4207
4208out:
4209 mutex_unlock(&net_todo_run_mutex);
4210} 4195}
4211 4196
4212static struct net_device_stats *internal_stats(struct net_device *dev) 4197static struct net_device_stats *internal_stats(struct net_device *dev)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 71edb8b36341..d6381c2a4693 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -73,7 +73,7 @@ void __rtnl_unlock(void)
73 73
74void rtnl_unlock(void) 74void rtnl_unlock(void)
75{ 75{
76 mutex_unlock(&rtnl_mutex); 76 /* This fellow will unlock it for us. */
77 netdev_run_todo(); 77 netdev_run_todo();
78} 78}
79 79