aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--include/linux/netdevice.h26
-rw-r--r--include/linux/skbuff.h76
-rw-r--r--net/core/dev.c24
-rw-r--r--net/ipv4/gre_demux.c1
-rw-r--r--net/ipv4/gre_offload.c7
-rw-r--r--net/ipv4/udp_offload.c5
-rw-r--r--net/sctp/input.c8
13 files changed, 101 insertions, 64 deletions
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 42e9cea52bdc..435d060f630b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1683,7 +1683,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1683 if (netdev->features & NETIF_F_RXHASH) 1683 if (netdev->features & NETIF_F_RXHASH)
1684 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); 1684 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1685 1685
1686 skb->encapsulation = rxcp->tunneled; 1686 skb->csum_level = rxcp->tunneled;
1687 skb_mark_napi_id(skb, napi); 1687 skb_mark_napi_id(skb, napi);
1688 1688
1689 if (rxcp->vlanf) 1689 if (rxcp->vlanf)
@@ -1741,7 +1741,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1741 if (adapter->netdev->features & NETIF_F_RXHASH) 1741 if (adapter->netdev->features & NETIF_F_RXHASH)
1742 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); 1742 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1743 1743
1744 skb->encapsulation = rxcp->tunneled; 1744 skb->csum_level = rxcp->tunneled;
1745 skb_mark_napi_id(skb, napi); 1745 skb_mark_napi_id(skb, napi);
1746 1746
1747 if (rxcp->vlanf) 1747 if (rxcp->vlanf)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4bf49d2acb04..b60f16381229 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1241,7 +1241,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1241 ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && 1241 ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1242 (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); 1242 (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1243 1243
1244 skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
1245 skb->ip_summed = CHECKSUM_NONE; 1244 skb->ip_summed = CHECKSUM_NONE;
1246 1245
1247 /* Rx csum enabled and ip headers found? */ 1246 /* Rx csum enabled and ip headers found? */
@@ -1315,6 +1314,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1315 } 1314 }
1316 1315
1317 skb->ip_summed = CHECKSUM_UNNECESSARY; 1316 skb->ip_summed = CHECKSUM_UNNECESSARY;
1317 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1318 1318
1319 return; 1319 return;
1320 1320
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 64b089151add..50cf5b8d0e15 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -746,7 +746,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
746 ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && 746 ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
747 (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); 747 (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
748 748
749 skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
750 skb->ip_summed = CHECKSUM_NONE; 749 skb->ip_summed = CHECKSUM_NONE;
751 750
752 /* Rx csum enabled and ip headers found? */ 751 /* Rx csum enabled and ip headers found? */
@@ -820,6 +819,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
820 } 819 }
821 820
822 skb->ip_summed = CHECKSUM_UNNECESSARY; 821 skb->ip_summed = CHECKSUM_UNNECESSARY;
822 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
823 823
824 return; 824 return;
825 825
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9c909d23f14c..b7da466a20d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -769,7 +769,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
769 gro_skb->ip_summed = CHECKSUM_UNNECESSARY; 769 gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
770 770
771 if (l2_tunnel) 771 if (l2_tunnel)
772 gro_skb->encapsulation = 1; 772 gro_skb->csum_level = 1;
773 if ((cqe->vlan_my_qpn & 773 if ((cqe->vlan_my_qpn &
774 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && 774 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
775 (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 775 (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
@@ -823,8 +823,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
823 skb->protocol = eth_type_trans(skb, dev); 823 skb->protocol = eth_type_trans(skb, dev);
824 skb_record_rx_queue(skb, cq->ring); 824 skb_record_rx_queue(skb, cq->ring);
825 825
826 if (l2_tunnel) 826 if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
827 skb->encapsulation = 1; 827 skb->csum_level = 1;
828 828
829 if (dev->features & NETIF_F_RXHASH) 829 if (dev->features & NETIF_F_RXHASH)
830 skb_set_hash(skb, 830 skb_set_hash(skb,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index e45bf09af0c9..18e5de72e9b4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -1753,7 +1753,7 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1753 1753
1754 if (qlcnic_encap_length(sts_data[1]) && 1754 if (qlcnic_encap_length(sts_data[1]) &&
1755 skb->ip_summed == CHECKSUM_UNNECESSARY) { 1755 skb->ip_summed == CHECKSUM_UNNECESSARY) {
1756 skb->encapsulation = 1; 1756 skb->csum_level = 1;
1757 adapter->stats.encap_rx_csummed++; 1757 adapter->stats.encap_rx_csummed++;
1758 } 1758 }
1759 1759
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index beb377b2d4b7..67527f3d3be2 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1158,8 +1158,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1158 if (!vs) 1158 if (!vs)
1159 goto drop; 1159 goto drop;
1160 1160
1161 skb_pop_rcv_encapsulation(skb);
1162
1163 vs->rcv(vs, skb, vxh->vx_vni); 1161 vs->rcv(vs, skb, vxh->vx_vni);
1164 return 0; 1162 return 0;
1165 1163
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index dfc1d8b8bd0f..456eb1fe51e8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1883,8 +1883,8 @@ struct napi_gro_cb {
1883 /* GRO checksum is valid */ 1883 /* GRO checksum is valid */
1884 u8 csum_valid:1; 1884 u8 csum_valid:1;
1885 1885
1886 /* Number encapsulation layers crossed */ 1886 /* Number of checksums via CHECKSUM_UNNECESSARY */
1887 u8 encapsulation; 1887 u8 csum_cnt:3;
1888 1888
1889 /* used to support CHECKSUM_COMPLETE for tunneling protocols */ 1889 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
1890 __wsum csum; 1890 __wsum csum;
@@ -2179,8 +2179,7 @@ static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2179 __sum16 check) 2179 __sum16 check)
2180{ 2180{
2181 return (skb->ip_summed != CHECKSUM_PARTIAL && 2181 return (skb->ip_summed != CHECKSUM_PARTIAL &&
2182 (skb->ip_summed != CHECKSUM_UNNECESSARY || 2182 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2183 (NAPI_GRO_CB(skb)->encapsulation > skb->encapsulation)) &&
2184 (!zero_okay || check)); 2183 (!zero_okay || check));
2185} 2184}
2186 2185
@@ -2196,18 +2195,17 @@ static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2196 return __skb_gro_checksum_complete(skb); 2195 return __skb_gro_checksum_complete(skb);
2197} 2196}
2198 2197
2199/* Update skb for CHECKSUM_UNNECESSARY when we verified a top level
2200 * checksum or an encapsulated one during GRO. This saves work
2201 * if we fallback to normal path with the packet.
2202 */
2203static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) 2198static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2204{ 2199{
2205 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 2200 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2206 if (NAPI_GRO_CB(skb)->encapsulation) 2201 /* Consume a checksum from CHECKSUM_UNNECESSARY */
2207 skb->encapsulation = 1; 2202 NAPI_GRO_CB(skb)->csum_cnt--;
2208 } else if (skb->ip_summed != CHECKSUM_PARTIAL) { 2203 } else {
2209 skb->ip_summed = CHECKSUM_UNNECESSARY; 2204 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
2210 skb->encapsulation = 0; 2205 * verified a new top level checksum or an encapsulated one
2206 * during GRO. This saves work if we fallback to normal path.
2207 */
2208 __skb_incr_checksum_unnecessary(skb);
2211 } 2209 }
2212} 2210}
2213 2211
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b69b7b512c06..c93b5859a772 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -47,11 +47,29 @@
47 * 47 *
48 * The hardware you're dealing with doesn't calculate the full checksum 48 * The hardware you're dealing with doesn't calculate the full checksum
49 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums 49 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
50 * for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will 50 * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
51 * set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still 51 * if their checksums are okay. skb->csum is still undefined in this case
52 * undefined in this case though. It is a bad option, but, unfortunately, 52 * though. It is a bad option, but, unfortunately, nowadays most vendors do
53 * nowadays most vendors do this. Apparently with the secret goal to sell 53 * this. Apparently with the secret goal to sell you new devices, when you
54 * you new devices, when you will add new protocol to your host, f.e. IPv6 8) 54 * will add new protocol to your host, f.e. IPv6 8)
55 *
56 * CHECKSUM_UNNECESSARY is applicable to following protocols:
57 * TCP: IPv6 and IPv4.
58 * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
59 * zero UDP checksum for either IPv4 or IPv6, the networking stack
60 * may perform further validation in this case.
61 * GRE: only if the checksum is present in the header.
62 * SCTP: indicates the CRC in SCTP header has been validated.
63 *
64 * skb->csum_level indicates the number of consecutive checksums found in
65 * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
66 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
67 * and a device is able to verify the checksums for UDP (possibly zero),
68 * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
69 * two. If the device were only able to verify the UDP checksum and not
70 * GRE, either because it doesn't support GRE checksum of because GRE
71 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
72 * not considered in this case).
55 * 73 *
56 * CHECKSUM_COMPLETE: 74 * CHECKSUM_COMPLETE:
57 * 75 *
@@ -112,6 +130,9 @@
112#define CHECKSUM_COMPLETE 2 130#define CHECKSUM_COMPLETE 2
113#define CHECKSUM_PARTIAL 3 131#define CHECKSUM_PARTIAL 3
114 132
133/* Maximum value in skb->csum_level */
134#define SKB_MAX_CSUM_LEVEL 3
135
115#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) 136#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
116#define SKB_WITH_OVERHEAD(X) \ 137#define SKB_WITH_OVERHEAD(X) \
117 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 138 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -571,11 +592,7 @@ struct sk_buff {
571 __u8 wifi_acked:1; 592 __u8 wifi_acked:1;
572 __u8 no_fcs:1; 593 __u8 no_fcs:1;
573 __u8 head_frag:1; 594 __u8 head_frag:1;
574 /* Encapsulation protocol and NIC drivers should use 595 /* Indicates the inner headers are valid in the skbuff. */
575 * this flag to indicate to each other if the skb contains
576 * encapsulated packet or not and maybe use the inner packet
577 * headers if needed
578 */
579 __u8 encapsulation:1; 596 __u8 encapsulation:1;
580 __u8 encap_hdr_csum:1; 597 __u8 encap_hdr_csum:1;
581 __u8 csum_valid:1; 598 __u8 csum_valid:1;
@@ -598,6 +615,11 @@ struct sk_buff {
598 __u32 reserved_tailroom; 615 __u32 reserved_tailroom;
599 }; 616 };
600 617
618 kmemcheck_bitfield_begin(flags3);
619 __u8 csum_level:2;
620 /* 14 bit hole */
621 kmemcheck_bitfield_end(flags3);
622
601 __be16 inner_protocol; 623 __be16 inner_protocol;
602 __u16 inner_transport_header; 624 __u16 inner_transport_header;
603 __u16 inner_network_header; 625 __u16 inner_network_header;
@@ -1862,18 +1884,6 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1862 return pskb_may_pull(skb, skb_network_offset(skb) + len); 1884 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1863} 1885}
1864 1886
1865static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
1866{
1867 /* Only continue with checksum unnecessary if device indicated
1868 * it is valid across encapsulation (skb->encapsulation was set).
1869 */
1870 if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation)
1871 skb->ip_summed = CHECKSUM_NONE;
1872
1873 skb->encapsulation = 0;
1874 skb->csum_valid = 0;
1875}
1876
1877/* 1887/*
1878 * CPUs often take a performance hit when accessing unaligned memory 1888 * CPUs often take a performance hit when accessing unaligned memory
1879 * locations. The actual performance hit varies, it can be small if the 1889 * locations. The actual performance hit varies, it can be small if the
@@ -2794,6 +2804,27 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2794 0 : __skb_checksum_complete(skb); 2804 0 : __skb_checksum_complete(skb);
2795} 2805}
2796 2806
2807static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
2808{
2809 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2810 if (skb->csum_level == 0)
2811 skb->ip_summed = CHECKSUM_NONE;
2812 else
2813 skb->csum_level--;
2814 }
2815}
2816
2817static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
2818{
2819 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2820 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
2821 skb->csum_level++;
2822 } else if (skb->ip_summed == CHECKSUM_NONE) {
2823 skb->ip_summed = CHECKSUM_UNNECESSARY;
2824 skb->csum_level = 0;
2825 }
2826}
2827
2797/* Check if we need to perform checksum complete validation. 2828/* Check if we need to perform checksum complete validation.
2798 * 2829 *
2799 * Returns true if checksum complete is needed, false otherwise 2830 * Returns true if checksum complete is needed, false otherwise
@@ -2805,6 +2836,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
2805{ 2836{
2806 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { 2837 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
2807 skb->csum_valid = 1; 2838 skb->csum_valid = 1;
2839 __skb_decr_checksum_unnecessary(skb);
2808 return false; 2840 return false;
2809 } 2841 }
2810 2842
diff --git a/net/core/dev.c b/net/core/dev.c
index 26d296c2447c..a6077ef56345 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3962,13 +3962,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3962 3962
3963 gro_list_prepare(napi, skb); 3963 gro_list_prepare(napi, skb);
3964 3964
3965 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3966 NAPI_GRO_CB(skb)->csum = skb->csum;
3967 NAPI_GRO_CB(skb)->csum_valid = 1;
3968 } else {
3969 NAPI_GRO_CB(skb)->csum_valid = 0;
3970 }
3971
3972 rcu_read_lock(); 3965 rcu_read_lock();
3973 list_for_each_entry_rcu(ptype, head, list) { 3966 list_for_each_entry_rcu(ptype, head, list) {
3974 if (ptype->type != type || !ptype->callbacks.gro_receive) 3967 if (ptype->type != type || !ptype->callbacks.gro_receive)
@@ -3980,7 +3973,22 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3980 NAPI_GRO_CB(skb)->flush = 0; 3973 NAPI_GRO_CB(skb)->flush = 0;
3981 NAPI_GRO_CB(skb)->free = 0; 3974 NAPI_GRO_CB(skb)->free = 0;
3982 NAPI_GRO_CB(skb)->udp_mark = 0; 3975 NAPI_GRO_CB(skb)->udp_mark = 0;
3983 NAPI_GRO_CB(skb)->encapsulation = 0; 3976
3977 /* Setup for GRO checksum validation */
3978 switch (skb->ip_summed) {
3979 case CHECKSUM_COMPLETE:
3980 NAPI_GRO_CB(skb)->csum = skb->csum;
3981 NAPI_GRO_CB(skb)->csum_valid = 1;
3982 NAPI_GRO_CB(skb)->csum_cnt = 0;
3983 break;
3984 case CHECKSUM_UNNECESSARY:
3985 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
3986 NAPI_GRO_CB(skb)->csum_valid = 0;
3987 break;
3988 default:
3989 NAPI_GRO_CB(skb)->csum_cnt = 0;
3990 NAPI_GRO_CB(skb)->csum_valid = 0;
3991 }
3984 3992
3985 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb); 3993 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
3986 break; 3994 break;
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 7c1a8ff974dd..0485bf7f8f03 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -125,7 +125,6 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
125 *csum_err = true; 125 *csum_err = true;
126 return -EINVAL; 126 return -EINVAL;
127 } 127 }
128 skb_pop_rcv_encapsulation(skb);
129 options++; 128 options++;
130 } 129 }
131 130
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index d1bd16937d93..a4d7965fb880 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -172,12 +172,9 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
172 } 172 }
173 173
174 /* Don't bother verifying checksum if we're going to flush anyway. */ 174 /* Don't bother verifying checksum if we're going to flush anyway. */
175 if (greh->flags & GRE_CSUM) { 175 if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush &&
176 if (!NAPI_GRO_CB(skb)->flush && 176 skb_gro_checksum_simple_validate(skb))
177 skb_gro_checksum_simple_validate(skb))
178 goto out_unlock; 177 goto out_unlock;
179 NAPI_GRO_CB(skb)->encapsulation++;
180 }
181 178
182 flush = 0; 179 flush = 0;
183 180
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 8ed460e3753c..a6adff98382a 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -238,12 +238,13 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
238 int flush = 1; 238 int flush = 1;
239 239
240 if (NAPI_GRO_CB(skb)->udp_mark || 240 if (NAPI_GRO_CB(skb)->udp_mark ||
241 (!skb->encapsulation && !NAPI_GRO_CB(skb)->csum_valid)) 241 (skb->ip_summed != CHECKSUM_PARTIAL &&
242 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
243 !NAPI_GRO_CB(skb)->csum_valid))
242 goto out; 244 goto out;
243 245
244 /* mark that this skb passed once through the udp gro layer */ 246 /* mark that this skb passed once through the udp gro layer */
245 NAPI_GRO_CB(skb)->udp_mark = 1; 247 NAPI_GRO_CB(skb)->udp_mark = 1;
246 NAPI_GRO_CB(skb)->encapsulation++;
247 248
248 rcu_read_lock(); 249 rcu_read_lock();
249 uo_priv = rcu_dereference(udp_offload_base); 250 uo_priv = rcu_dereference(udp_offload_base);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index c1b991294516..b6493b3f11a9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -133,9 +133,13 @@ int sctp_rcv(struct sk_buff *skb)
133 __skb_pull(skb, skb_transport_offset(skb)); 133 __skb_pull(skb, skb_transport_offset(skb));
134 if (skb->len < sizeof(struct sctphdr)) 134 if (skb->len < sizeof(struct sctphdr))
135 goto discard_it; 135 goto discard_it;
136 if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) && 136
137 sctp_rcv_checksum(net, skb) < 0) 137 skb->csum_valid = 0; /* Previous value not applicable */
138 if (skb_csum_unnecessary(skb))
139 __skb_decr_checksum_unnecessary(skb);
140 else if (!sctp_checksum_disable && sctp_rcv_checksum(net, skb) < 0)
138 goto discard_it; 141 goto discard_it;
142 skb->csum_valid = 1;
139 143
140 skb_pull(skb, sizeof(struct sctphdr)); 144 skb_pull(skb, sizeof(struct sctphdr));
141 145