aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c49
1 files changed, 20 insertions, 29 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index e719ed29310f..0ae08d3f57e7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -122,6 +122,7 @@
122#include <linux/if_arp.h> 122#include <linux/if_arp.h>
123#include <linux/if_vlan.h> 123#include <linux/if_vlan.h>
124#include <linux/ip.h> 124#include <linux/ip.h>
125#include <net/ip.h>
125#include <linux/ipv6.h> 126#include <linux/ipv6.h>
126#include <linux/in.h> 127#include <linux/in.h>
127#include <linux/jhash.h> 128#include <linux/jhash.h>
@@ -1667,7 +1668,7 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1667{ 1668{
1668 u32 addr1, addr2, ports; 1669 u32 addr1, addr2, ports;
1669 u32 hash, ihl; 1670 u32 hash, ihl;
1670 u8 ip_proto; 1671 u8 ip_proto = 0;
1671 1672
1672 if (unlikely(!simple_tx_hashrnd_initialized)) { 1673 if (unlikely(!simple_tx_hashrnd_initialized)) {
1673 get_random_bytes(&simple_tx_hashrnd, 4); 1674 get_random_bytes(&simple_tx_hashrnd, 4);
@@ -1676,7 +1677,8 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1676 1677
1677 switch (skb->protocol) { 1678 switch (skb->protocol) {
1678 case __constant_htons(ETH_P_IP): 1679 case __constant_htons(ETH_P_IP):
1679 ip_proto = ip_hdr(skb)->protocol; 1680 if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
1681 ip_proto = ip_hdr(skb)->protocol;
1680 addr1 = ip_hdr(skb)->saddr; 1682 addr1 = ip_hdr(skb)->saddr;
1681 addr2 = ip_hdr(skb)->daddr; 1683 addr2 = ip_hdr(skb)->daddr;
1682 ihl = ip_hdr(skb)->ihl; 1684 ihl = ip_hdr(skb)->ihl;
@@ -2916,6 +2918,12 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
2916 return 0; 2918 return 0;
2917} 2919}
2918 2920
2921static void dev_change_rx_flags(struct net_device *dev, int flags)
2922{
2923 if (dev->flags & IFF_UP && dev->change_rx_flags)
2924 dev->change_rx_flags(dev, flags);
2925}
2926
2919static int __dev_set_promiscuity(struct net_device *dev, int inc) 2927static int __dev_set_promiscuity(struct net_device *dev, int inc)
2920{ 2928{
2921 unsigned short old_flags = dev->flags; 2929 unsigned short old_flags = dev->flags;
@@ -2953,8 +2961,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
2953 current->uid, current->gid, 2961 current->uid, current->gid,
2954 audit_get_sessionid(current)); 2962 audit_get_sessionid(current));
2955 2963
2956 if (dev->change_rx_flags) 2964 dev_change_rx_flags(dev, IFF_PROMISC);
2957 dev->change_rx_flags(dev, IFF_PROMISC);
2958 } 2965 }
2959 return 0; 2966 return 0;
2960} 2967}
@@ -3020,8 +3027,7 @@ int dev_set_allmulti(struct net_device *dev, int inc)
3020 } 3027 }
3021 } 3028 }
3022 if (dev->flags ^ old_flags) { 3029 if (dev->flags ^ old_flags) {
3023 if (dev->change_rx_flags) 3030 dev_change_rx_flags(dev, IFF_ALLMULTI);
3024 dev->change_rx_flags(dev, IFF_ALLMULTI);
3025 dev_set_rx_mode(dev); 3031 dev_set_rx_mode(dev);
3026 } 3032 }
3027 return 0; 3033 return 0;
@@ -3345,8 +3351,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
3345 * Load in the correct multicast list now the flags have changed. 3351 * Load in the correct multicast list now the flags have changed.
3346 */ 3352 */
3347 3353
3348 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST) 3354 if ((old_flags ^ flags) & IFF_MULTICAST)
3349 dev->change_rx_flags(dev, IFF_MULTICAST); 3355 dev_change_rx_flags(dev, IFF_MULTICAST);
3350 3356
3351 dev_set_rx_mode(dev); 3357 dev_set_rx_mode(dev);
3352 3358
@@ -3806,14 +3812,11 @@ static int dev_new_index(struct net *net)
3806} 3812}
3807 3813
3808/* Delayed registration/unregisteration */ 3814/* Delayed registration/unregisteration */
3809static DEFINE_SPINLOCK(net_todo_list_lock);
3810static LIST_HEAD(net_todo_list); 3815static LIST_HEAD(net_todo_list);
3811 3816
3812static void net_set_todo(struct net_device *dev) 3817static void net_set_todo(struct net_device *dev)
3813{ 3818{
3814 spin_lock(&net_todo_list_lock);
3815 list_add_tail(&dev->todo_list, &net_todo_list); 3819 list_add_tail(&dev->todo_list, &net_todo_list);
3816 spin_unlock(&net_todo_list_lock);
3817} 3820}
3818 3821
3819static void rollback_registered(struct net_device *dev) 3822static void rollback_registered(struct net_device *dev)
@@ -4140,33 +4143,24 @@ static void netdev_wait_allrefs(struct net_device *dev)
4140 * free_netdev(y1); 4143 * free_netdev(y1);
4141 * free_netdev(y2); 4144 * free_netdev(y2);
4142 * 4145 *
4143 * We are invoked by rtnl_unlock() after it drops the semaphore. 4146 * We are invoked by rtnl_unlock().
4144 * This allows us to deal with problems: 4147 * This allows us to deal with problems:
4145 * 1) We can delete sysfs objects which invoke hotplug 4148 * 1) We can delete sysfs objects which invoke hotplug
4146 * without deadlocking with linkwatch via keventd. 4149 * without deadlocking with linkwatch via keventd.
4147 * 2) Since we run with the RTNL semaphore not held, we can sleep 4150 * 2) Since we run with the RTNL semaphore not held, we can sleep
4148 * safely in order to wait for the netdev refcnt to drop to zero. 4151 * safely in order to wait for the netdev refcnt to drop to zero.
4152 *
4153 * We must not return until all unregister events added during
4154 * the interval the lock was held have been completed.
4149 */ 4155 */
4150static DEFINE_MUTEX(net_todo_run_mutex);
4151void netdev_run_todo(void) 4156void netdev_run_todo(void)
4152{ 4157{
4153 struct list_head list; 4158 struct list_head list;
4154 4159
4155 /* Need to guard against multiple cpu's getting out of order. */
4156 mutex_lock(&net_todo_run_mutex);
4157
4158 /* Not safe to do outside the semaphore. We must not return
4159 * until all unregister events invoked by the local processor
4160 * have been completed (either by this todo run, or one on
4161 * another cpu).
4162 */
4163 if (list_empty(&net_todo_list))
4164 goto out;
4165
4166 /* Snapshot list, allow later requests */ 4160 /* Snapshot list, allow later requests */
4167 spin_lock(&net_todo_list_lock);
4168 list_replace_init(&net_todo_list, &list); 4161 list_replace_init(&net_todo_list, &list);
4169 spin_unlock(&net_todo_list_lock); 4162
4163 __rtnl_unlock();
4170 4164
4171 while (!list_empty(&list)) { 4165 while (!list_empty(&list)) {
4172 struct net_device *dev 4166 struct net_device *dev
@@ -4198,9 +4192,6 @@ void netdev_run_todo(void)
4198 /* Free network device */ 4192 /* Free network device */
4199 kobject_put(&dev->dev.kobj); 4193 kobject_put(&dev->dev.kobj);
4200 } 4194 }
4201
4202out:
4203 mutex_unlock(&net_todo_run_mutex);
4204} 4195}
4205 4196
4206static struct net_device_stats *internal_stats(struct net_device *dev) 4197static struct net_device_stats *internal_stats(struct net_device *dev)