diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2008-10-07 18:50:03 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-10-07 18:50:03 -0400 |
commit | 58ec3b4db9eb5a28e3aec5f407a54e28f7039c19 (patch) | |
tree | 224232795e1a2d5966c27b74008714881682644b /net/core | |
parent | 742201e7baf66c64995fdd033d706220e6208fab (diff) |
net: Fix netdev_run_todo dead-lock
Benjamin Thery tracked down a bug that explains many instances
of the error
unregister_netdevice: waiting for %s to become free. Usage count = %d
It turns out that netdev_run_todo can dead-lock with itself if
a second instance of it is run in a thread that will then free
a reference to the device waited on by the first instance.
The problem is really quite silly. We were trying to create
parallelism where none was required. As netdev_run_todo always
follows a RTNL section, and that todo tasks can only be added
with the RTNL held, by definition you should only need to wait
for the very ones that you've added and be done with it.
There is no need for a second mutex or spinlock.
This is exactly what the following patch does.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 27 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 2 |
2 files changed, 7 insertions, 22 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index fd992c0f2717..0ae08d3f57e7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3812,14 +3812,11 @@ static int dev_new_index(struct net *net) | |||
3812 | } | 3812 | } |
3813 | 3813 | ||
3814 | /* Delayed registration/unregisteration */ | 3814 | /* Delayed registration/unregisteration */ |
3815 | static DEFINE_SPINLOCK(net_todo_list_lock); | ||
3816 | static LIST_HEAD(net_todo_list); | 3815 | static LIST_HEAD(net_todo_list); |
3817 | 3816 | ||
3818 | static void net_set_todo(struct net_device *dev) | 3817 | static void net_set_todo(struct net_device *dev) |
3819 | { | 3818 | { |
3820 | spin_lock(&net_todo_list_lock); | ||
3821 | list_add_tail(&dev->todo_list, &net_todo_list); | 3819 | list_add_tail(&dev->todo_list, &net_todo_list); |
3822 | spin_unlock(&net_todo_list_lock); | ||
3823 | } | 3820 | } |
3824 | 3821 | ||
3825 | static void rollback_registered(struct net_device *dev) | 3822 | static void rollback_registered(struct net_device *dev) |
@@ -4146,33 +4143,24 @@ static void netdev_wait_allrefs(struct net_device *dev) | |||
4146 | * free_netdev(y1); | 4143 | * free_netdev(y1); |
4147 | * free_netdev(y2); | 4144 | * free_netdev(y2); |
4148 | * | 4145 | * |
4149 | * We are invoked by rtnl_unlock() after it drops the semaphore. | 4146 | * We are invoked by rtnl_unlock(). |
4150 | * This allows us to deal with problems: | 4147 | * This allows us to deal with problems: |
4151 | * 1) We can delete sysfs objects which invoke hotplug | 4148 | * 1) We can delete sysfs objects which invoke hotplug |
4152 | * without deadlocking with linkwatch via keventd. | 4149 | * without deadlocking with linkwatch via keventd. |
4153 | * 2) Since we run with the RTNL semaphore not held, we can sleep | 4150 | * 2) Since we run with the RTNL semaphore not held, we can sleep |
4154 | * safely in order to wait for the netdev refcnt to drop to zero. | 4151 | * safely in order to wait for the netdev refcnt to drop to zero. |
4152 | * | ||
4153 | * We must not return until all unregister events added during | ||
4154 | * the interval the lock was held have been completed. | ||
4155 | */ | 4155 | */ |
4156 | static DEFINE_MUTEX(net_todo_run_mutex); | ||
4157 | void netdev_run_todo(void) | 4156 | void netdev_run_todo(void) |
4158 | { | 4157 | { |
4159 | struct list_head list; | 4158 | struct list_head list; |
4160 | 4159 | ||
4161 | /* Need to guard against multiple cpu's getting out of order. */ | ||
4162 | mutex_lock(&net_todo_run_mutex); | ||
4163 | |||
4164 | /* Not safe to do outside the semaphore. We must not return | ||
4165 | * until all unregister events invoked by the local processor | ||
4166 | * have been completed (either by this todo run, or one on | ||
4167 | * another cpu). | ||
4168 | */ | ||
4169 | if (list_empty(&net_todo_list)) | ||
4170 | goto out; | ||
4171 | |||
4172 | /* Snapshot list, allow later requests */ | 4160 | /* Snapshot list, allow later requests */ |
4173 | spin_lock(&net_todo_list_lock); | ||
4174 | list_replace_init(&net_todo_list, &list); | 4161 | list_replace_init(&net_todo_list, &list); |
4175 | spin_unlock(&net_todo_list_lock); | 4162 | |
4163 | __rtnl_unlock(); | ||
4176 | 4164 | ||
4177 | while (!list_empty(&list)) { | 4165 | while (!list_empty(&list)) { |
4178 | struct net_device *dev | 4166 | struct net_device *dev |
@@ -4204,9 +4192,6 @@ void netdev_run_todo(void) | |||
4204 | /* Free network device */ | 4192 | /* Free network device */ |
4205 | kobject_put(&dev->dev.kobj); | 4193 | kobject_put(&dev->dev.kobj); |
4206 | } | 4194 | } |
4207 | |||
4208 | out: | ||
4209 | mutex_unlock(&net_todo_run_mutex); | ||
4210 | } | 4195 | } |
4211 | 4196 | ||
4212 | static struct net_device_stats *internal_stats(struct net_device *dev) | 4197 | static struct net_device_stats *internal_stats(struct net_device *dev) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 71edb8b36341..d6381c2a4693 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -73,7 +73,7 @@ void __rtnl_unlock(void) | |||
73 | 73 | ||
74 | void rtnl_unlock(void) | 74 | void rtnl_unlock(void) |
75 | { | 75 | { |
76 | mutex_unlock(&rtnl_mutex); | 76 | /* This fellow will unlock it for us. */ |
77 | netdev_run_todo(); | 77 | netdev_run_todo(); |
78 | } | 78 | } |
79 | 79 | ||