aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c136
1 files changed, 55 insertions, 81 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index c98052487e98..c95d664b2b42 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2145,30 +2145,42 @@ void __netif_schedule(struct Qdisc *q)
2145} 2145}
2146EXPORT_SYMBOL(__netif_schedule); 2146EXPORT_SYMBOL(__netif_schedule);
2147 2147
2148void dev_kfree_skb_irq(struct sk_buff *skb) 2148struct dev_kfree_skb_cb {
2149 enum skb_free_reason reason;
2150};
2151
2152static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2149{ 2153{
2150 if (atomic_dec_and_test(&skb->users)) { 2154 return (struct dev_kfree_skb_cb *)skb->cb;
2151 struct softnet_data *sd; 2155}
2152 unsigned long flags;
2153 2156
2154 local_irq_save(flags); 2157void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2155 sd = &__get_cpu_var(softnet_data); 2158{
2156 skb->next = sd->completion_queue; 2159 unsigned long flags;
2157 sd->completion_queue = skb; 2160
2158 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2161 if (likely(atomic_read(&skb->users) == 1)) {
2159 local_irq_restore(flags); 2162 smp_rmb();
2163 atomic_set(&skb->users, 0);
2164 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2165 return;
2160 } 2166 }
2167 get_kfree_skb_cb(skb)->reason = reason;
2168 local_irq_save(flags);
2169 skb->next = __this_cpu_read(softnet_data.completion_queue);
2170 __this_cpu_write(softnet_data.completion_queue, skb);
2171 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2172 local_irq_restore(flags);
2161} 2173}
2162EXPORT_SYMBOL(dev_kfree_skb_irq); 2174EXPORT_SYMBOL(__dev_kfree_skb_irq);
2163 2175
2164void dev_kfree_skb_any(struct sk_buff *skb) 2176void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2165{ 2177{
2166 if (in_irq() || irqs_disabled()) 2178 if (in_irq() || irqs_disabled())
2167 dev_kfree_skb_irq(skb); 2179 __dev_kfree_skb_irq(skb, reason);
2168 else 2180 else
2169 dev_kfree_skb(skb); 2181 dev_kfree_skb(skb);
2170} 2182}
2171EXPORT_SYMBOL(dev_kfree_skb_any); 2183EXPORT_SYMBOL(__dev_kfree_skb_any);
2172 2184
2173 2185
2174/** 2186/**
@@ -2523,21 +2535,6 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2523} 2535}
2524EXPORT_SYMBOL(netif_skb_features); 2536EXPORT_SYMBOL(netif_skb_features);
2525 2537
2526/*
2527 * Returns true if either:
2528 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2529 * 2. skb is fragmented and the device does not support SG.
2530 */
2531static inline int skb_needs_linearize(struct sk_buff *skb,
2532 netdev_features_t features)
2533{
2534 return skb_is_nonlinear(skb) &&
2535 ((skb_has_frag_list(skb) &&
2536 !(features & NETIF_F_FRAGLIST)) ||
2537 (skb_shinfo(skb)->nr_frags &&
2538 !(features & NETIF_F_SG)));
2539}
2540
2541int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2538int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2542 struct netdev_queue *txq, void *accel_priv) 2539 struct netdev_queue *txq, void *accel_priv)
2543{ 2540{
@@ -3306,7 +3303,10 @@ static void net_tx_action(struct softirq_action *h)
3306 clist = clist->next; 3303 clist = clist->next;
3307 3304
3308 WARN_ON(atomic_read(&skb->users)); 3305 WARN_ON(atomic_read(&skb->users));
3309 trace_kfree_skb(skb, net_tx_action); 3306 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3307 trace_consume_skb(skb);
3308 else
3309 trace_kfree_skb(skb, net_tx_action);
3310 __kfree_skb(skb); 3310 __kfree_skb(skb);
3311 } 3311 }
3312 } 3312 }
@@ -3752,7 +3752,7 @@ static int napi_gro_complete(struct sk_buff *skb)
3752 if (ptype->type != type || !ptype->callbacks.gro_complete) 3752 if (ptype->type != type || !ptype->callbacks.gro_complete)
3753 continue; 3753 continue;
3754 3754
3755 err = ptype->callbacks.gro_complete(skb); 3755 err = ptype->callbacks.gro_complete(skb, 0);
3756 break; 3756 break;
3757 } 3757 }
3758 rcu_read_unlock(); 3758 rcu_read_unlock();
@@ -3818,6 +3818,23 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3818 } 3818 }
3819} 3819}
3820 3820
3821static void skb_gro_reset_offset(struct sk_buff *skb)
3822{
3823 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3824 const skb_frag_t *frag0 = &pinfo->frags[0];
3825
3826 NAPI_GRO_CB(skb)->data_offset = 0;
3827 NAPI_GRO_CB(skb)->frag0 = NULL;
3828 NAPI_GRO_CB(skb)->frag0_len = 0;
3829
3830 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3831 pinfo->nr_frags &&
3832 !PageHighMem(skb_frag_page(frag0))) {
3833 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3834 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
3835 }
3836}
3837
3821static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3838static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3822{ 3839{
3823 struct sk_buff **pp = NULL; 3840 struct sk_buff **pp = NULL;
@@ -3833,6 +3850,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3833 if (skb_is_gso(skb) || skb_has_frag_list(skb)) 3850 if (skb_is_gso(skb) || skb_has_frag_list(skb))
3834 goto normal; 3851 goto normal;
3835 3852
3853 skb_gro_reset_offset(skb);
3836 gro_list_prepare(napi, skb); 3854 gro_list_prepare(napi, skb);
3837 3855
3838 rcu_read_lock(); 3856 rcu_read_lock();
@@ -3938,27 +3956,8 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3938 return ret; 3956 return ret;
3939} 3957}
3940 3958
3941static void skb_gro_reset_offset(struct sk_buff *skb)
3942{
3943 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3944 const skb_frag_t *frag0 = &pinfo->frags[0];
3945
3946 NAPI_GRO_CB(skb)->data_offset = 0;
3947 NAPI_GRO_CB(skb)->frag0 = NULL;
3948 NAPI_GRO_CB(skb)->frag0_len = 0;
3949
3950 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3951 pinfo->nr_frags &&
3952 !PageHighMem(skb_frag_page(frag0))) {
3953 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3954 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
3955 }
3956}
3957
3958gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3959gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3959{ 3960{
3960 skb_gro_reset_offset(skb);
3961
3962 return napi_skb_finish(dev_gro_receive(napi, skb), skb); 3961 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
3963} 3962}
3964EXPORT_SYMBOL(napi_gro_receive); 3963EXPORT_SYMBOL(napi_gro_receive);
@@ -3992,12 +3991,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
3992{ 3991{
3993 switch (ret) { 3992 switch (ret) {
3994 case GRO_NORMAL: 3993 case GRO_NORMAL:
3995 case GRO_HELD: 3994 if (netif_receive_skb(skb))
3996 skb->protocol = eth_type_trans(skb, skb->dev);
3997
3998 if (ret == GRO_HELD)
3999 skb_gro_pull(skb, -ETH_HLEN);
4000 else if (netif_receive_skb(skb))
4001 ret = GRO_DROP; 3995 ret = GRO_DROP;
4002 break; 3996 break;
4003 3997
@@ -4006,6 +4000,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
4006 napi_reuse_skb(napi, skb); 4000 napi_reuse_skb(napi, skb);
4007 break; 4001 break;
4008 4002
4003 case GRO_HELD:
4009 case GRO_MERGED: 4004 case GRO_MERGED:
4010 break; 4005 break;
4011 } 4006 }
@@ -4016,36 +4011,15 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
4016static struct sk_buff *napi_frags_skb(struct napi_struct *napi) 4011static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
4017{ 4012{
4018 struct sk_buff *skb = napi->skb; 4013 struct sk_buff *skb = napi->skb;
4019 struct ethhdr *eth;
4020 unsigned int hlen;
4021 unsigned int off;
4022 4014
4023 napi->skb = NULL; 4015 napi->skb = NULL;
4024 4016
4025 skb_reset_mac_header(skb); 4017 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) {
4026 skb_gro_reset_offset(skb); 4018 napi_reuse_skb(napi, skb);
4027 4019 return NULL;
4028 off = skb_gro_offset(skb);
4029 hlen = off + sizeof(*eth);
4030 eth = skb_gro_header_fast(skb, off);
4031 if (skb_gro_header_hard(skb, hlen)) {
4032 eth = skb_gro_header_slow(skb, hlen, off);
4033 if (unlikely(!eth)) {
4034 napi_reuse_skb(napi, skb);
4035 skb = NULL;
4036 goto out;
4037 }
4038 } 4020 }
4021 skb->protocol = eth_type_trans(skb, skb->dev);
4039 4022
4040 skb_gro_pull(skb, sizeof(*eth));
4041
4042 /*
4043 * This works because the only protocols we care about don't require
4044 * special handling. We'll fix it up properly at the end.
4045 */
4046 skb->protocol = eth->h_proto;
4047
4048out:
4049 return skb; 4023 return skb;
4050} 4024}
4051 4025