aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c32
1 files changed, 20 insertions, 12 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 30eedf677913..367a586d0c8a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly; /* Taps */
148static struct list_head offload_base __read_mostly; 148static struct list_head offload_base __read_mostly;
149 149
150static int netif_rx_internal(struct sk_buff *skb); 150static int netif_rx_internal(struct sk_buff *skb);
151static int call_netdevice_notifiers_info(unsigned long val,
152 struct net_device *dev,
153 struct netdev_notifier_info *info);
151 154
152/* 155/*
153 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 156 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -1207,7 +1210,11 @@ EXPORT_SYMBOL(netdev_features_change);
1207void netdev_state_change(struct net_device *dev) 1210void netdev_state_change(struct net_device *dev)
1208{ 1211{
1209 if (dev->flags & IFF_UP) { 1212 if (dev->flags & IFF_UP) {
1210 call_netdevice_notifiers(NETDEV_CHANGE, dev); 1213 struct netdev_notifier_change_info change_info;
1214
1215 change_info.flags_changed = 0;
1216 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1217 &change_info.info);
1211 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1218 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1212 } 1219 }
1213} 1220}
@@ -4089,6 +4096,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4089 skb->vlan_tci = 0; 4096 skb->vlan_tci = 0;
4090 skb->dev = napi->dev; 4097 skb->dev = napi->dev;
4091 skb->skb_iif = 0; 4098 skb->skb_iif = 0;
4099 skb->encapsulation = 0;
4100 skb_shinfo(skb)->gso_type = 0;
4092 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 4101 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4093 4102
4094 napi->skb = skb; 4103 napi->skb = skb;
@@ -4227,9 +4236,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
4227#endif 4236#endif
4228 napi->weight = weight_p; 4237 napi->weight = weight_p;
4229 local_irq_disable(); 4238 local_irq_disable();
4230 while (work < quota) { 4239 while (1) {
4231 struct sk_buff *skb; 4240 struct sk_buff *skb;
4232 unsigned int qlen;
4233 4241
4234 while ((skb = __skb_dequeue(&sd->process_queue))) { 4242 while ((skb = __skb_dequeue(&sd->process_queue))) {
4235 local_irq_enable(); 4243 local_irq_enable();
@@ -4243,24 +4251,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
4243 } 4251 }
4244 4252
4245 rps_lock(sd); 4253 rps_lock(sd);
4246 qlen = skb_queue_len(&sd->input_pkt_queue); 4254 if (skb_queue_empty(&sd->input_pkt_queue)) {
4247 if (qlen)
4248 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4249 &sd->process_queue);
4250
4251 if (qlen < quota - work) {
4252 /* 4255 /*
4253 * Inline a custom version of __napi_complete(). 4256 * Inline a custom version of __napi_complete().
4254 * only current cpu owns and manipulates this napi, 4257 * only current cpu owns and manipulates this napi,
4255 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 4258 * and NAPI_STATE_SCHED is the only possible flag set
4256 * we can use a plain write instead of clear_bit(), 4259 * on backlog.
4260 * We can use a plain write instead of clear_bit(),
4257 * and we dont need an smp_mb() memory barrier. 4261 * and we dont need an smp_mb() memory barrier.
4258 */ 4262 */
4259 list_del(&napi->poll_list); 4263 list_del(&napi->poll_list);
4260 napi->state = 0; 4264 napi->state = 0;
4265 rps_unlock(sd);
4261 4266
4262 quota = work + qlen; 4267 break;
4263 } 4268 }
4269
4270 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4271 &sd->process_queue);
4264 rps_unlock(sd); 4272 rps_unlock(sd);
4265 } 4273 }
4266 local_irq_enable(); 4274 local_irq_enable();