aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-07-13 20:28:09 -0400
committerDavid S. Miller <davem@davemloft.net>2015-07-13 20:28:09 -0400
commit638d3c63811e31b2745f7fdd568b38c8abcffe03 (patch)
tree606426ab4de84e59c5f50e1e3cce6e24819d45af /net/core/dev.c
parent74fe61f17e999a458d5f64ca2aa9a0282ca32198 (diff)
parentf760b87f8f12eb262f14603e65042996fe03720e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/bridge/br_mdb.c Minor conflict in br_mdb.c, in 'net' we added a memset of the on-stack 'ip' variable whereas in 'net-next' we assign a new member 'vid'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c45
1 files changed, 22 insertions, 23 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index e0d270143fc7..69445a33ace6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -677,10 +677,6 @@ int dev_get_iflink(const struct net_device *dev)
677 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 677 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
678 return dev->netdev_ops->ndo_get_iflink(dev); 678 return dev->netdev_ops->ndo_get_iflink(dev);
679 679
680 /* If dev->rtnl_link_ops is set, it's a virtual interface. */
681 if (dev->rtnl_link_ops)
682 return 0;
683
684 return dev->ifindex; 680 return dev->ifindex;
685} 681}
686EXPORT_SYMBOL(dev_get_iflink); 682EXPORT_SYMBOL(dev_get_iflink);
@@ -3452,6 +3448,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3452 local_irq_save(flags); 3448 local_irq_save(flags);
3453 3449
3454 rps_lock(sd); 3450 rps_lock(sd);
3451 if (!netif_running(skb->dev))
3452 goto drop;
3455 qlen = skb_queue_len(&sd->input_pkt_queue); 3453 qlen = skb_queue_len(&sd->input_pkt_queue);
3456 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { 3454 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3457 if (qlen) { 3455 if (qlen) {
@@ -3473,6 +3471,7 @@ enqueue:
3473 goto enqueue; 3471 goto enqueue;
3474 } 3472 }
3475 3473
3474drop:
3476 sd->dropped++; 3475 sd->dropped++;
3477 rps_unlock(sd); 3476 rps_unlock(sd);
3478 3477
@@ -3775,8 +3774,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3775 3774
3776 pt_prev = NULL; 3775 pt_prev = NULL;
3777 3776
3778 rcu_read_lock();
3779
3780another_round: 3777another_round:
3781 skb->skb_iif = skb->dev->ifindex; 3778 skb->skb_iif = skb->dev->ifindex;
3782 3779
@@ -3786,7 +3783,7 @@ another_round:
3786 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 3783 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3787 skb = skb_vlan_untag(skb); 3784 skb = skb_vlan_untag(skb);
3788 if (unlikely(!skb)) 3785 if (unlikely(!skb))
3789 goto unlock; 3786 goto out;
3790 } 3787 }
3791 3788
3792#ifdef CONFIG_NET_CLS_ACT 3789#ifdef CONFIG_NET_CLS_ACT
@@ -3816,10 +3813,10 @@ skip_taps:
3816 if (static_key_false(&ingress_needed)) { 3813 if (static_key_false(&ingress_needed)) {
3817 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); 3814 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3818 if (!skb) 3815 if (!skb)
3819 goto unlock; 3816 goto out;
3820 3817
3821 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 3818 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
3822 goto unlock; 3819 goto out;
3823 } 3820 }
3824#endif 3821#endif
3825#ifdef CONFIG_NET_CLS_ACT 3822#ifdef CONFIG_NET_CLS_ACT
@@ -3837,7 +3834,7 @@ ncls:
3837 if (vlan_do_receive(&skb)) 3834 if (vlan_do_receive(&skb))
3838 goto another_round; 3835 goto another_round;
3839 else if (unlikely(!skb)) 3836 else if (unlikely(!skb))
3840 goto unlock; 3837 goto out;
3841 } 3838 }
3842 3839
3843 rx_handler = rcu_dereference(skb->dev->rx_handler); 3840 rx_handler = rcu_dereference(skb->dev->rx_handler);
@@ -3849,7 +3846,7 @@ ncls:
3849 switch (rx_handler(&skb)) { 3846 switch (rx_handler(&skb)) {
3850 case RX_HANDLER_CONSUMED: 3847 case RX_HANDLER_CONSUMED:
3851 ret = NET_RX_SUCCESS; 3848 ret = NET_RX_SUCCESS;
3852 goto unlock; 3849 goto out;
3853 case RX_HANDLER_ANOTHER: 3850 case RX_HANDLER_ANOTHER:
3854 goto another_round; 3851 goto another_round;
3855 case RX_HANDLER_EXACT: 3852 case RX_HANDLER_EXACT:
@@ -3903,8 +3900,7 @@ drop:
3903 ret = NET_RX_DROP; 3900 ret = NET_RX_DROP;
3904 } 3901 }
3905 3902
3906unlock: 3903out:
3907 rcu_read_unlock();
3908 return ret; 3904 return ret;
3909} 3905}
3910 3906
@@ -3935,29 +3931,30 @@ static int __netif_receive_skb(struct sk_buff *skb)
3935 3931
3936static int netif_receive_skb_internal(struct sk_buff *skb) 3932static int netif_receive_skb_internal(struct sk_buff *skb)
3937{ 3933{
3934 int ret;
3935
3938 net_timestamp_check(netdev_tstamp_prequeue, skb); 3936 net_timestamp_check(netdev_tstamp_prequeue, skb);
3939 3937
3940 if (skb_defer_rx_timestamp(skb)) 3938 if (skb_defer_rx_timestamp(skb))
3941 return NET_RX_SUCCESS; 3939 return NET_RX_SUCCESS;
3942 3940
3941 rcu_read_lock();
3942
3943#ifdef CONFIG_RPS 3943#ifdef CONFIG_RPS
3944 if (static_key_false(&rps_needed)) { 3944 if (static_key_false(&rps_needed)) {
3945 struct rps_dev_flow voidflow, *rflow = &voidflow; 3945 struct rps_dev_flow voidflow, *rflow = &voidflow;
3946 int cpu, ret; 3946 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
3947
3948 rcu_read_lock();
3949
3950 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3951 3947
3952 if (cpu >= 0) { 3948 if (cpu >= 0) {
3953 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3949 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3954 rcu_read_unlock(); 3950 rcu_read_unlock();
3955 return ret; 3951 return ret;
3956 } 3952 }
3957 rcu_read_unlock();
3958 } 3953 }
3959#endif 3954#endif
3960 return __netif_receive_skb(skb); 3955 ret = __netif_receive_skb(skb);
3956 rcu_read_unlock();
3957 return ret;
3961} 3958}
3962 3959
3963/** 3960/**
@@ -4502,8 +4499,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
4502 struct sk_buff *skb; 4499 struct sk_buff *skb;
4503 4500
4504 while ((skb = __skb_dequeue(&sd->process_queue))) { 4501 while ((skb = __skb_dequeue(&sd->process_queue))) {
4502 rcu_read_lock();
4505 local_irq_enable(); 4503 local_irq_enable();
4506 __netif_receive_skb(skb); 4504 __netif_receive_skb(skb);
4505 rcu_read_unlock();
4507 local_irq_disable(); 4506 local_irq_disable();
4508 input_queue_head_incr(sd); 4507 input_queue_head_incr(sd);
4509 if (++work >= quota) { 4508 if (++work >= quota) {
@@ -6139,6 +6138,7 @@ static void rollback_registered_many(struct list_head *head)
6139 unlist_netdevice(dev); 6138 unlist_netdevice(dev);
6140 6139
6141 dev->reg_state = NETREG_UNREGISTERING; 6140 dev->reg_state = NETREG_UNREGISTERING;
6141 on_each_cpu(flush_backlog, dev, 1);
6142 } 6142 }
6143 6143
6144 synchronize_net(); 6144 synchronize_net();
@@ -6409,7 +6409,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
6409 struct netdev_queue *tx; 6409 struct netdev_queue *tx;
6410 size_t sz = count * sizeof(*tx); 6410 size_t sz = count * sizeof(*tx);
6411 6411
6412 BUG_ON(count < 1 || count > 0xffff); 6412 if (count < 1 || count > 0xffff)
6413 return -EINVAL;
6413 6414
6414 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 6415 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6415 if (!tx) { 6416 if (!tx) {
@@ -6773,8 +6774,6 @@ void netdev_run_todo(void)
6773 6774
6774 dev->reg_state = NETREG_UNREGISTERED; 6775 dev->reg_state = NETREG_UNREGISTERED;
6775 6776
6776 on_each_cpu(flush_backlog, dev, 1);
6777
6778 netdev_wait_allrefs(dev); 6777 netdev_wait_allrefs(dev);
6779 6778
6780 /* paranoia */ 6779 /* paranoia */