aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c43
1 files changed, 16 insertions, 27 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index e8eb2b47834..0ae08d3f57e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2918,6 +2918,12 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
2918 return 0; 2918 return 0;
2919} 2919}
2920 2920
2921static void dev_change_rx_flags(struct net_device *dev, int flags)
2922{
2923 if (dev->flags & IFF_UP && dev->change_rx_flags)
2924 dev->change_rx_flags(dev, flags);
2925}
2926
2921static int __dev_set_promiscuity(struct net_device *dev, int inc) 2927static int __dev_set_promiscuity(struct net_device *dev, int inc)
2922{ 2928{
2923 unsigned short old_flags = dev->flags; 2929 unsigned short old_flags = dev->flags;
@@ -2955,8 +2961,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
2955 current->uid, current->gid, 2961 current->uid, current->gid,
2956 audit_get_sessionid(current)); 2962 audit_get_sessionid(current));
2957 2963
2958 if (dev->change_rx_flags) 2964 dev_change_rx_flags(dev, IFF_PROMISC);
2959 dev->change_rx_flags(dev, IFF_PROMISC);
2960 } 2965 }
2961 return 0; 2966 return 0;
2962} 2967}
@@ -3022,8 +3027,7 @@ int dev_set_allmulti(struct net_device *dev, int inc)
3022 } 3027 }
3023 } 3028 }
3024 if (dev->flags ^ old_flags) { 3029 if (dev->flags ^ old_flags) {
3025 if (dev->change_rx_flags) 3030 dev_change_rx_flags(dev, IFF_ALLMULTI);
3026 dev->change_rx_flags(dev, IFF_ALLMULTI);
3027 dev_set_rx_mode(dev); 3031 dev_set_rx_mode(dev);
3028 } 3032 }
3029 return 0; 3033 return 0;
@@ -3347,8 +3351,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
3347 * Load in the correct multicast list now the flags have changed. 3351 * Load in the correct multicast list now the flags have changed.
3348 */ 3352 */
3349 3353
3350 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST) 3354 if ((old_flags ^ flags) & IFF_MULTICAST)
3351 dev->change_rx_flags(dev, IFF_MULTICAST); 3355 dev_change_rx_flags(dev, IFF_MULTICAST);
3352 3356
3353 dev_set_rx_mode(dev); 3357 dev_set_rx_mode(dev);
3354 3358
@@ -3808,14 +3812,11 @@ static int dev_new_index(struct net *net)
3808} 3812}
3809 3813
3810/* Delayed registration/unregisteration */ 3814/* Delayed registration/unregisteration */
3811static DEFINE_SPINLOCK(net_todo_list_lock);
3812static LIST_HEAD(net_todo_list); 3815static LIST_HEAD(net_todo_list);
3813 3816
3814static void net_set_todo(struct net_device *dev) 3817static void net_set_todo(struct net_device *dev)
3815{ 3818{
3816 spin_lock(&net_todo_list_lock);
3817 list_add_tail(&dev->todo_list, &net_todo_list); 3819 list_add_tail(&dev->todo_list, &net_todo_list);
3818 spin_unlock(&net_todo_list_lock);
3819} 3820}
3820 3821
3821static void rollback_registered(struct net_device *dev) 3822static void rollback_registered(struct net_device *dev)
@@ -4142,33 +4143,24 @@ static void netdev_wait_allrefs(struct net_device *dev)
4142 * free_netdev(y1); 4143 * free_netdev(y1);
4143 * free_netdev(y2); 4144 * free_netdev(y2);
4144 * 4145 *
4145 * We are invoked by rtnl_unlock() after it drops the semaphore. 4146 * We are invoked by rtnl_unlock().
4146 * This allows us to deal with problems: 4147 * This allows us to deal with problems:
4147 * 1) We can delete sysfs objects which invoke hotplug 4148 * 1) We can delete sysfs objects which invoke hotplug
4148 * without deadlocking with linkwatch via keventd. 4149 * without deadlocking with linkwatch via keventd.
4149 * 2) Since we run with the RTNL semaphore not held, we can sleep 4150 * 2) Since we run with the RTNL semaphore not held, we can sleep
4150 * safely in order to wait for the netdev refcnt to drop to zero. 4151 * safely in order to wait for the netdev refcnt to drop to zero.
4152 *
4153 * We must not return until all unregister events added during
4154 * the interval the lock was held have been completed.
4151 */ 4155 */
4152static DEFINE_MUTEX(net_todo_run_mutex);
4153void netdev_run_todo(void) 4156void netdev_run_todo(void)
4154{ 4157{
4155 struct list_head list; 4158 struct list_head list;
4156 4159
4157 /* Need to guard against multiple cpu's getting out of order. */
4158 mutex_lock(&net_todo_run_mutex);
4159
4160 /* Not safe to do outside the semaphore. We must not return
4161 * until all unregister events invoked by the local processor
4162 * have been completed (either by this todo run, or one on
4163 * another cpu).
4164 */
4165 if (list_empty(&net_todo_list))
4166 goto out;
4167
4168 /* Snapshot list, allow later requests */ 4160 /* Snapshot list, allow later requests */
4169 spin_lock(&net_todo_list_lock);
4170 list_replace_init(&net_todo_list, &list); 4161 list_replace_init(&net_todo_list, &list);
4171 spin_unlock(&net_todo_list_lock); 4162
4163 __rtnl_unlock();
4172 4164
4173 while (!list_empty(&list)) { 4165 while (!list_empty(&list)) {
4174 struct net_device *dev 4166 struct net_device *dev
@@ -4200,9 +4192,6 @@ void netdev_run_todo(void)
4200 /* Free network device */ 4192 /* Free network device */
4201 kobject_put(&dev->dev.kobj); 4193 kobject_put(&dev->dev.kobj);
4202 } 4194 }
4203
4204out:
4205 mutex_unlock(&net_todo_run_mutex);
4206} 4195}
4207 4196
4208static struct net_device_stats *internal_stats(struct net_device *dev) 4197static struct net_device_stats *internal_stats(struct net_device *dev)