aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--include/linux/skbuff.h74
-rw-r--r--net/ipv4/gre_demux.c1
3 files changed, 51 insertions, 26 deletions
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index beb377b2d4b7..67527f3d3be2 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1158,8 +1158,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1158 if (!vs) 1158 if (!vs)
1159 goto drop; 1159 goto drop;
1160 1160
1161 skb_pop_rcv_encapsulation(skb);
1162
1163 vs->rcv(vs, skb, vxh->vx_vni); 1161 vs->rcv(vs, skb, vxh->vx_vni);
1164 return 0; 1162 return 0;
1165 1163
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3c9574c80933..c93b5859a772 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -47,11 +47,29 @@
47 * 47 *
48 * The hardware you're dealing with doesn't calculate the full checksum 48 * The hardware you're dealing with doesn't calculate the full checksum
49 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums 49 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
50 * for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will 50 * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
51 * set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still 51 * if their checksums are okay. skb->csum is still undefined in this case
52 * undefined in this case though. It is a bad option, but, unfortunately, 52 * though. It is a bad option, but, unfortunately, nowadays most vendors do
53 * nowadays most vendors do this. Apparently with the secret goal to sell 53 * this. Apparently with the secret goal to sell you new devices, when you
54 * you new devices, when you will add new protocol to your host, f.e. IPv6 8) 54 * will add new protocol to your host, f.e. IPv6 8)
55 *
56 * CHECKSUM_UNNECESSARY is applicable to following protocols:
57 * TCP: IPv6 and IPv4.
58 * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
59 * zero UDP checksum for either IPv4 or IPv6, the networking stack
60 * may perform further validation in this case.
61 * GRE: only if the checksum is present in the header.
62 * SCTP: indicates the CRC in SCTP header has been validated.
63 *
64 * skb->csum_level indicates the number of consecutive checksums found in
65 * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
66 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
67 * and a device is able to verify the checksums for UDP (possibly zero),
68 * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
69 * two. If the device were only able to verify the UDP checksum and not
70 * GRE, either because it doesn't support GRE checksum of because GRE
71 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
72 * not considered in this case).
55 * 73 *
56 * CHECKSUM_COMPLETE: 74 * CHECKSUM_COMPLETE:
57 * 75 *
@@ -112,6 +130,9 @@
112#define CHECKSUM_COMPLETE 2 130#define CHECKSUM_COMPLETE 2
113#define CHECKSUM_PARTIAL 3 131#define CHECKSUM_PARTIAL 3
114 132
133/* Maximum value in skb->csum_level */
134#define SKB_MAX_CSUM_LEVEL 3
135
115#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) 136#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
116#define SKB_WITH_OVERHEAD(X) \ 137#define SKB_WITH_OVERHEAD(X) \
117 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 138 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -571,11 +592,7 @@ struct sk_buff {
571 __u8 wifi_acked:1; 592 __u8 wifi_acked:1;
572 __u8 no_fcs:1; 593 __u8 no_fcs:1;
573 __u8 head_frag:1; 594 __u8 head_frag:1;
574 /* Encapsulation protocol and NIC drivers should use 595 /* Indicates the inner headers are valid in the skbuff. */
575 * this flag to indicate to each other if the skb contains
576 * encapsulated packet or not and maybe use the inner packet
577 * headers if needed
578 */
579 __u8 encapsulation:1; 596 __u8 encapsulation:1;
580 __u8 encap_hdr_csum:1; 597 __u8 encap_hdr_csum:1;
581 __u8 csum_valid:1; 598 __u8 csum_valid:1;
@@ -599,7 +616,8 @@ struct sk_buff {
599 }; 616 };
600 617
601 kmemcheck_bitfield_begin(flags3); 618 kmemcheck_bitfield_begin(flags3);
602 /* 16 bit hole */ 619 __u8 csum_level:2;
620 /* 14 bit hole */
603 kmemcheck_bitfield_end(flags3); 621 kmemcheck_bitfield_end(flags3);
604 622
605 __be16 inner_protocol; 623 __be16 inner_protocol;
@@ -1866,18 +1884,6 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1866 return pskb_may_pull(skb, skb_network_offset(skb) + len); 1884 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1867} 1885}
1868 1886
1869static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
1870{
1871 /* Only continue with checksum unnecessary if device indicated
1872 * it is valid across encapsulation (skb->encapsulation was set).
1873 */
1874 if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation)
1875 skb->ip_summed = CHECKSUM_NONE;
1876
1877 skb->encapsulation = 0;
1878 skb->csum_valid = 0;
1879}
1880
1881/* 1887/*
1882 * CPUs often take a performance hit when accessing unaligned memory 1888 * CPUs often take a performance hit when accessing unaligned memory
1883 * locations. The actual performance hit varies, it can be small if the 1889 * locations. The actual performance hit varies, it can be small if the
@@ -2798,6 +2804,27 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2798 0 : __skb_checksum_complete(skb); 2804 0 : __skb_checksum_complete(skb);
2799} 2805}
2800 2806
2807static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
2808{
2809 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2810 if (skb->csum_level == 0)
2811 skb->ip_summed = CHECKSUM_NONE;
2812 else
2813 skb->csum_level--;
2814 }
2815}
2816
2817static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
2818{
2819 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2820 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
2821 skb->csum_level++;
2822 } else if (skb->ip_summed == CHECKSUM_NONE) {
2823 skb->ip_summed = CHECKSUM_UNNECESSARY;
2824 skb->csum_level = 0;
2825 }
2826}
2827
2801/* Check if we need to perform checksum complete validation. 2828/* Check if we need to perform checksum complete validation.
2802 * 2829 *
2803 * Returns true if checksum complete is needed, false otherwise 2830 * Returns true if checksum complete is needed, false otherwise
@@ -2809,6 +2836,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
2809{ 2836{
2810 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { 2837 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
2811 skb->csum_valid = 1; 2838 skb->csum_valid = 1;
2839 __skb_decr_checksum_unnecessary(skb);
2812 return false; 2840 return false;
2813 } 2841 }
2814 2842
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 7c1a8ff974dd..0485bf7f8f03 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -125,7 +125,6 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
125 *csum_err = true; 125 *csum_err = true;
126 return -EINVAL; 126 return -EINVAL;
127 } 127 }
128 skb_pop_rcv_encapsulation(skb);
129 options++; 128 options++;
130 } 129 }
131 130