diff options
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r-- | include/linux/skbuff.h | 172 |
1 files changed, 163 insertions, 9 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 08074a810164..ec89301ada41 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -338,13 +338,18 @@ enum { | |||
338 | 338 | ||
339 | SKB_GSO_GRE = 1 << 6, | 339 | SKB_GSO_GRE = 1 << 6, |
340 | 340 | ||
341 | SKB_GSO_IPIP = 1 << 7, | 341 | SKB_GSO_GRE_CSUM = 1 << 7, |
342 | 342 | ||
343 | SKB_GSO_SIT = 1 << 8, | 343 | SKB_GSO_IPIP = 1 << 8, |
344 | 344 | ||
345 | SKB_GSO_UDP_TUNNEL = 1 << 9, | 345 | SKB_GSO_SIT = 1 << 9, |
346 | |||
347 | SKB_GSO_UDP_TUNNEL = 1 << 10, | ||
348 | |||
349 | SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, | ||
350 | |||
351 | SKB_GSO_MPLS = 1 << 12, | ||
346 | 352 | ||
347 | SKB_GSO_MPLS = 1 << 10, | ||
348 | }; | 353 | }; |
349 | 354 | ||
350 | #if BITS_PER_LONG > 32 | 355 | #if BITS_PER_LONG > 32 |
@@ -426,7 +431,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, | |||
426 | * @csum_start: Offset from skb->head where checksumming should start | 431 | * @csum_start: Offset from skb->head where checksumming should start |
427 | * @csum_offset: Offset from csum_start where checksum should be stored | 432 | * @csum_offset: Offset from csum_start where checksum should be stored |
428 | * @priority: Packet queueing priority | 433 | * @priority: Packet queueing priority |
429 | * @local_df: allow local fragmentation | 434 | * @ignore_df: allow local fragmentation |
430 | * @cloned: Head may be cloned (check refcnt to be sure) | 435 | * @cloned: Head may be cloned (check refcnt to be sure) |
431 | * @ip_summed: Driver fed us an IP checksum | 436 | * @ip_summed: Driver fed us an IP checksum |
432 | * @nohdr: Payload reference only, must not modify header | 437 | * @nohdr: Payload reference only, must not modify header |
@@ -514,7 +519,7 @@ struct sk_buff { | |||
514 | }; | 519 | }; |
515 | __u32 priority; | 520 | __u32 priority; |
516 | kmemcheck_bitfield_begin(flags1); | 521 | kmemcheck_bitfield_begin(flags1); |
517 | __u8 local_df:1, | 522 | __u8 ignore_df:1, |
518 | cloned:1, | 523 | cloned:1, |
519 | ip_summed:2, | 524 | ip_summed:2, |
520 | nohdr:1, | 525 | nohdr:1, |
@@ -567,7 +572,10 @@ struct sk_buff { | |||
567 | * headers if needed | 572 | * headers if needed |
568 | */ | 573 | */ |
569 | __u8 encapsulation:1; | 574 | __u8 encapsulation:1; |
570 | /* 6/8 bit hole (depending on ndisc_nodetype presence) */ | 575 | __u8 encap_hdr_csum:1; |
576 | __u8 csum_valid:1; | ||
577 | __u8 csum_complete_sw:1; | ||
578 | /* 3/5 bit hole (depending on ndisc_nodetype presence) */ | ||
571 | kmemcheck_bitfield_end(flags2); | 579 | kmemcheck_bitfield_end(flags2); |
572 | 580 | ||
573 | #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL | 581 | #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL |
@@ -739,7 +747,13 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); | |||
739 | int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); | 747 | int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); |
740 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); | 748 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); |
741 | struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); | 749 | struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); |
742 | struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask); | 750 | struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, |
751 | gfp_t gfp_mask, bool fclone); | ||
752 | static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, | ||
753 | gfp_t gfp_mask) | ||
754 | { | ||
755 | return __pskb_copy_fclone(skb, headroom, gfp_mask, false); | ||
756 | } | ||
743 | 757 | ||
744 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); | 758 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); |
745 | struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, | 759 | struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, |
@@ -1840,6 +1854,18 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) | |||
1840 | return pskb_may_pull(skb, skb_network_offset(skb) + len); | 1854 | return pskb_may_pull(skb, skb_network_offset(skb) + len); |
1841 | } | 1855 | } |
1842 | 1856 | ||
1857 | static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb) | ||
1858 | { | ||
1859 | /* Only continue with checksum unnecessary if device indicated | ||
1860 | * it is valid across encapsulation (skb->encapsulation was set). | ||
1861 | */ | ||
1862 | if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation) | ||
1863 | skb->ip_summed = CHECKSUM_NONE; | ||
1864 | |||
1865 | skb->encapsulation = 0; | ||
1866 | skb->csum_valid = 0; | ||
1867 | } | ||
1868 | |||
1843 | /* | 1869 | /* |
1844 | * CPUs often take a performance hit when accessing unaligned memory | 1870 | * CPUs often take a performance hit when accessing unaligned memory |
1845 | * locations. The actual performance hit varies, it can be small if the | 1871 | * locations. The actual performance hit varies, it can be small if the |
@@ -2233,6 +2259,14 @@ static inline struct sk_buff *pskb_copy(struct sk_buff *skb, | |||
2233 | return __pskb_copy(skb, skb_headroom(skb), gfp_mask); | 2259 | return __pskb_copy(skb, skb_headroom(skb), gfp_mask); |
2234 | } | 2260 | } |
2235 | 2261 | ||
2262 | |||
2263 | static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb, | ||
2264 | gfp_t gfp_mask) | ||
2265 | { | ||
2266 | return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true); | ||
2267 | } | ||
2268 | |||
2269 | |||
2236 | /** | 2270 | /** |
2237 | * skb_clone_writable - is the header of a clone writable | 2271 | * skb_clone_writable - is the header of a clone writable |
2238 | * @skb: buffer to check | 2272 | * @skb: buffer to check |
@@ -2716,7 +2750,7 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb); | |||
2716 | 2750 | ||
2717 | static inline int skb_csum_unnecessary(const struct sk_buff *skb) | 2751 | static inline int skb_csum_unnecessary(const struct sk_buff *skb) |
2718 | { | 2752 | { |
2719 | return skb->ip_summed & CHECKSUM_UNNECESSARY; | 2753 | return ((skb->ip_summed & CHECKSUM_UNNECESSARY) || skb->csum_valid); |
2720 | } | 2754 | } |
2721 | 2755 | ||
2722 | /** | 2756 | /** |
@@ -2741,6 +2775,103 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb) | |||
2741 | 0 : __skb_checksum_complete(skb); | 2775 | 0 : __skb_checksum_complete(skb); |
2742 | } | 2776 | } |
2743 | 2777 | ||
2778 | /* Check if we need to perform checksum complete validation. | ||
2779 | * | ||
2780 | * Returns true if checksum complete is needed, false otherwise | ||
2781 | * (either checksum is unnecessary or zero checksum is allowed). | ||
2782 | */ | ||
2783 | static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, | ||
2784 | bool zero_okay, | ||
2785 | __sum16 check) | ||
2786 | { | ||
2787 | if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { | ||
2788 | skb->csum_valid = 1; | ||
2789 | return false; | ||
2790 | } | ||
2791 | |||
2792 | return true; | ||
2793 | } | ||
2794 | |||
2795 | /* For small packets <= CHECKSUM_BREAK peform checksum complete directly | ||
2796 | * in checksum_init. | ||
2797 | */ | ||
2798 | #define CHECKSUM_BREAK 76 | ||
2799 | |||
2800 | /* Validate (init) checksum based on checksum complete. | ||
2801 | * | ||
2802 | * Return values: | ||
2803 | * 0: checksum is validated or try to in skb_checksum_complete. In the latter | ||
2804 | * case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo | ||
2805 | * checksum is stored in skb->csum for use in __skb_checksum_complete | ||
2806 | * non-zero: value of invalid checksum | ||
2807 | * | ||
2808 | */ | ||
2809 | static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, | ||
2810 | bool complete, | ||
2811 | __wsum psum) | ||
2812 | { | ||
2813 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | ||
2814 | if (!csum_fold(csum_add(psum, skb->csum))) { | ||
2815 | skb->csum_valid = 1; | ||
2816 | return 0; | ||
2817 | } | ||
2818 | } | ||
2819 | |||
2820 | skb->csum = psum; | ||
2821 | |||
2822 | if (complete || skb->len <= CHECKSUM_BREAK) { | ||
2823 | __sum16 csum; | ||
2824 | |||
2825 | csum = __skb_checksum_complete(skb); | ||
2826 | skb->csum_valid = !csum; | ||
2827 | return csum; | ||
2828 | } | ||
2829 | |||
2830 | return 0; | ||
2831 | } | ||
2832 | |||
2833 | static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) | ||
2834 | { | ||
2835 | return 0; | ||
2836 | } | ||
2837 | |||
2838 | /* Perform checksum validate (init). Note that this is a macro since we only | ||
2839 | * want to calculate the pseudo header which is an input function if necessary. | ||
2840 | * First we try to validate without any computation (checksum unnecessary) and | ||
2841 | * then calculate based on checksum complete calling the function to compute | ||
2842 | * pseudo header. | ||
2843 | * | ||
2844 | * Return values: | ||
2845 | * 0: checksum is validated or try to in skb_checksum_complete | ||
2846 | * non-zero: value of invalid checksum | ||
2847 | */ | ||
2848 | #define __skb_checksum_validate(skb, proto, complete, \ | ||
2849 | zero_okay, check, compute_pseudo) \ | ||
2850 | ({ \ | ||
2851 | __sum16 __ret = 0; \ | ||
2852 | skb->csum_valid = 0; \ | ||
2853 | if (__skb_checksum_validate_needed(skb, zero_okay, check)) \ | ||
2854 | __ret = __skb_checksum_validate_complete(skb, \ | ||
2855 | complete, compute_pseudo(skb, proto)); \ | ||
2856 | __ret; \ | ||
2857 | }) | ||
2858 | |||
2859 | #define skb_checksum_init(skb, proto, compute_pseudo) \ | ||
2860 | __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo) | ||
2861 | |||
2862 | #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \ | ||
2863 | __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo) | ||
2864 | |||
2865 | #define skb_checksum_validate(skb, proto, compute_pseudo) \ | ||
2866 | __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo) | ||
2867 | |||
2868 | #define skb_checksum_validate_zero_check(skb, proto, check, \ | ||
2869 | compute_pseudo) \ | ||
2870 | __skb_checksum_validate_(skb, proto, true, true, check, compute_pseudo) | ||
2871 | |||
2872 | #define skb_checksum_simple_validate(skb) \ | ||
2873 | __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) | ||
2874 | |||
2744 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 2875 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
2745 | void nf_conntrack_destroy(struct nf_conntrack *nfct); | 2876 | void nf_conntrack_destroy(struct nf_conntrack *nfct); |
2746 | static inline void nf_conntrack_put(struct nf_conntrack *nfct) | 2877 | static inline void nf_conntrack_put(struct nf_conntrack *nfct) |
@@ -2895,6 +3026,7 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb) | |||
2895 | struct skb_gso_cb { | 3026 | struct skb_gso_cb { |
2896 | int mac_offset; | 3027 | int mac_offset; |
2897 | int encap_level; | 3028 | int encap_level; |
3029 | __u16 csum_start; | ||
2898 | }; | 3030 | }; |
2899 | #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) | 3031 | #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) |
2900 | 3032 | ||
@@ -2919,6 +3051,28 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) | |||
2919 | return 0; | 3051 | return 0; |
2920 | } | 3052 | } |
2921 | 3053 | ||
3054 | /* Compute the checksum for a gso segment. First compute the checksum value | ||
3055 | * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and | ||
3056 | * then add in skb->csum (checksum from csum_start to end of packet). | ||
3057 | * skb->csum and csum_start are then updated to reflect the checksum of the | ||
3058 | * resultant packet starting from the transport header-- the resultant checksum | ||
3059 | * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo | ||
3060 | * header. | ||
3061 | */ | ||
3062 | static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) | ||
3063 | { | ||
3064 | int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) - | ||
3065 | skb_transport_offset(skb); | ||
3066 | __u16 csum; | ||
3067 | |||
3068 | csum = csum_fold(csum_partial(skb_transport_header(skb), | ||
3069 | plen, skb->csum)); | ||
3070 | skb->csum = res; | ||
3071 | SKB_GSO_CB(skb)->csum_start -= plen; | ||
3072 | |||
3073 | return csum; | ||
3074 | } | ||
3075 | |||
2922 | static inline bool skb_is_gso(const struct sk_buff *skb) | 3076 | static inline bool skb_is_gso(const struct sk_buff *skb) |
2923 | { | 3077 | { |
2924 | return skb_shinfo(skb)->gso_size; | 3078 | return skb_shinfo(skb)->gso_size; |