aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c526
1 files changed, 440 insertions, 86 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 06e72d3cdf60..869c7afe3b07 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -47,6 +47,8 @@
47#include <linux/in.h> 47#include <linux/in.h>
48#include <linux/inet.h> 48#include <linux/inet.h>
49#include <linux/slab.h> 49#include <linux/slab.h>
50#include <linux/tcp.h>
51#include <linux/udp.h>
50#include <linux/netdevice.h> 52#include <linux/netdevice.h>
51#ifdef CONFIG_NET_CLS_ACT 53#ifdef CONFIG_NET_CLS_ACT
52#include <net/pkt_sched.h> 54#include <net/pkt_sched.h>
@@ -65,6 +67,7 @@
65#include <net/dst.h> 67#include <net/dst.h>
66#include <net/sock.h> 68#include <net/sock.h>
67#include <net/checksum.h> 69#include <net/checksum.h>
70#include <net/ip6_checksum.h>
68#include <net/xfrm.h> 71#include <net/xfrm.h>
69 72
70#include <asm/uaccess.h> 73#include <asm/uaccess.h>
@@ -74,36 +77,6 @@
74struct kmem_cache *skbuff_head_cache __read_mostly; 77struct kmem_cache *skbuff_head_cache __read_mostly;
75static struct kmem_cache *skbuff_fclone_cache __read_mostly; 78static struct kmem_cache *skbuff_fclone_cache __read_mostly;
76 79
77static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
78 struct pipe_buffer *buf)
79{
80 put_page(buf->page);
81}
82
83static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
84 struct pipe_buffer *buf)
85{
86 get_page(buf->page);
87}
88
89static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
90 struct pipe_buffer *buf)
91{
92 return 1;
93}
94
95
96/* Pipe buffer operations for a socket. */
97static const struct pipe_buf_operations sock_pipe_buf_ops = {
98 .can_merge = 0,
99 .map = generic_pipe_buf_map,
100 .unmap = generic_pipe_buf_unmap,
101 .confirm = generic_pipe_buf_confirm,
102 .release = sock_pipe_buf_release,
103 .steal = sock_pipe_buf_steal,
104 .get = sock_pipe_buf_get,
105};
106
107/** 80/**
108 * skb_panic - private function for out-of-line support 81 * skb_panic - private function for out-of-line support
109 * @skb: buffer 82 * @skb: buffer
@@ -712,9 +685,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
712 new->inner_network_header = old->inner_network_header; 685 new->inner_network_header = old->inner_network_header;
713 new->inner_mac_header = old->inner_mac_header; 686 new->inner_mac_header = old->inner_mac_header;
714 skb_dst_copy(new, old); 687 skb_dst_copy(new, old);
715 new->rxhash = old->rxhash; 688 skb_copy_hash(new, old);
716 new->ooo_okay = old->ooo_okay; 689 new->ooo_okay = old->ooo_okay;
717 new->l4_rxhash = old->l4_rxhash;
718 new->no_fcs = old->no_fcs; 690 new->no_fcs = old->no_fcs;
719 new->encapsulation = old->encapsulation; 691 new->encapsulation = old->encapsulation;
720#ifdef CONFIG_XFRM 692#ifdef CONFIG_XFRM
@@ -735,9 +707,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
735 new->mark = old->mark; 707 new->mark = old->mark;
736 new->skb_iif = old->skb_iif; 708 new->skb_iif = old->skb_iif;
737 __nf_copy(new, old); 709 __nf_copy(new, old);
738#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
739 new->nf_trace = old->nf_trace;
740#endif
741#ifdef CONFIG_NET_SCHED 710#ifdef CONFIG_NET_SCHED
742 new->tc_index = old->tc_index; 711 new->tc_index = old->tc_index;
743#ifdef CONFIG_NET_CLS_ACT 712#ifdef CONFIG_NET_CLS_ACT
@@ -1830,7 +1799,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1830 .partial = partial, 1799 .partial = partial,
1831 .nr_pages_max = MAX_SKB_FRAGS, 1800 .nr_pages_max = MAX_SKB_FRAGS,
1832 .flags = flags, 1801 .flags = flags,
1833 .ops = &sock_pipe_buf_ops, 1802 .ops = &nosteal_pipe_buf_ops,
1834 .spd_release = sock_spd_release, 1803 .spd_release = sock_spd_release,
1835 }; 1804 };
1836 struct sk_buff *frag_iter; 1805 struct sk_buff *frag_iter;
@@ -2122,6 +2091,91 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2122} 2091}
2123EXPORT_SYMBOL(skb_copy_and_csum_bits); 2092EXPORT_SYMBOL(skb_copy_and_csum_bits);
2124 2093
2094 /**
2095 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2096 * @from: source buffer
2097 *
2098 * Calculates the amount of linear headroom needed in the 'to' skb passed
2099 * into skb_zerocopy().
2100 */
2101unsigned int
2102skb_zerocopy_headlen(const struct sk_buff *from)
2103{
2104 unsigned int hlen = 0;
2105
2106 if (!from->head_frag ||
2107 skb_headlen(from) < L1_CACHE_BYTES ||
2108 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2109 hlen = skb_headlen(from);
2110
2111 if (skb_has_frag_list(from))
2112 hlen = from->len;
2113
2114 return hlen;
2115}
2116EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2117
2118/**
2119 * skb_zerocopy - Zero copy skb to skb
2120 * @to: destination buffer
2121 * @from: source buffer
2122 * @len: number of bytes to copy from source buffer
2123 * @hlen: size of linear headroom in destination buffer
2124 *
2125 * Copies up to `len` bytes from `from` to `to` by creating references
2126 * to the frags in the source buffer.
2127 *
2128 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2129 * headroom in the `to` buffer.
2130 */
2131void
2132skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
2133{
2134 int i, j = 0;
2135 int plen = 0; /* length of skb->head fragment */
2136 struct page *page;
2137 unsigned int offset;
2138
2139 BUG_ON(!from->head_frag && !hlen);
2140
2141 /* dont bother with small payloads */
2142 if (len <= skb_tailroom(to)) {
2143 skb_copy_bits(from, 0, skb_put(to, len), len);
2144 return;
2145 }
2146
2147 if (hlen) {
2148 skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2149 len -= hlen;
2150 } else {
2151 plen = min_t(int, skb_headlen(from), len);
2152 if (plen) {
2153 page = virt_to_head_page(from->head);
2154 offset = from->data - (unsigned char *)page_address(page);
2155 __skb_fill_page_desc(to, 0, page, offset, plen);
2156 get_page(page);
2157 j = 1;
2158 len -= plen;
2159 }
2160 }
2161
2162 to->truesize += len + plen;
2163 to->len += len + plen;
2164 to->data_len += len + plen;
2165
2166 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2167 if (!len)
2168 break;
2169 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2170 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2171 len -= skb_shinfo(to)->frags[j].size;
2172 skb_frag_ref(to, j);
2173 j++;
2174 }
2175 skb_shinfo(to)->nr_frags = j;
2176}
2177EXPORT_SYMBOL_GPL(skb_zerocopy);
2178
2125void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 2179void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2126{ 2180{
2127 __wsum csum; 2181 __wsum csum;
@@ -2784,81 +2838,84 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2784 2838
2785/** 2839/**
2786 * skb_segment - Perform protocol segmentation on skb. 2840 * skb_segment - Perform protocol segmentation on skb.
2787 * @skb: buffer to segment 2841 * @head_skb: buffer to segment
2788 * @features: features for the output path (see dev->features) 2842 * @features: features for the output path (see dev->features)
2789 * 2843 *
2790 * This function performs segmentation on the given skb. It returns 2844 * This function performs segmentation on the given skb. It returns
2791 * a pointer to the first in a list of new skbs for the segments. 2845 * a pointer to the first in a list of new skbs for the segments.
2792 * In case of error it returns ERR_PTR(err). 2846 * In case of error it returns ERR_PTR(err).
2793 */ 2847 */
2794struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) 2848struct sk_buff *skb_segment(struct sk_buff *head_skb,
2849 netdev_features_t features)
2795{ 2850{
2796 struct sk_buff *segs = NULL; 2851 struct sk_buff *segs = NULL;
2797 struct sk_buff *tail = NULL; 2852 struct sk_buff *tail = NULL;
2798 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2853 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
2799 skb_frag_t *skb_frag = skb_shinfo(skb)->frags; 2854 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
2800 unsigned int mss = skb_shinfo(skb)->gso_size; 2855 unsigned int mss = skb_shinfo(head_skb)->gso_size;
2801 unsigned int doffset = skb->data - skb_mac_header(skb); 2856 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
2857 struct sk_buff *frag_skb = head_skb;
2802 unsigned int offset = doffset; 2858 unsigned int offset = doffset;
2803 unsigned int tnl_hlen = skb_tnl_header_len(skb); 2859 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
2804 unsigned int headroom; 2860 unsigned int headroom;
2805 unsigned int len; 2861 unsigned int len;
2806 __be16 proto; 2862 __be16 proto;
2807 bool csum; 2863 bool csum;
2808 int sg = !!(features & NETIF_F_SG); 2864 int sg = !!(features & NETIF_F_SG);
2809 int nfrags = skb_shinfo(skb)->nr_frags; 2865 int nfrags = skb_shinfo(head_skb)->nr_frags;
2810 int err = -ENOMEM; 2866 int err = -ENOMEM;
2811 int i = 0; 2867 int i = 0;
2812 int pos; 2868 int pos;
2813 2869
2814 proto = skb_network_protocol(skb); 2870 proto = skb_network_protocol(head_skb);
2815 if (unlikely(!proto)) 2871 if (unlikely(!proto))
2816 return ERR_PTR(-EINVAL); 2872 return ERR_PTR(-EINVAL);
2817 2873
2818 csum = !!can_checksum_protocol(features, proto); 2874 csum = !!can_checksum_protocol(features, proto);
2819 __skb_push(skb, doffset); 2875 __skb_push(head_skb, doffset);
2820 headroom = skb_headroom(skb); 2876 headroom = skb_headroom(head_skb);
2821 pos = skb_headlen(skb); 2877 pos = skb_headlen(head_skb);
2822 2878
2823 do { 2879 do {
2824 struct sk_buff *nskb; 2880 struct sk_buff *nskb;
2825 skb_frag_t *frag; 2881 skb_frag_t *nskb_frag;
2826 int hsize; 2882 int hsize;
2827 int size; 2883 int size;
2828 2884
2829 len = skb->len - offset; 2885 len = head_skb->len - offset;
2830 if (len > mss) 2886 if (len > mss)
2831 len = mss; 2887 len = mss;
2832 2888
2833 hsize = skb_headlen(skb) - offset; 2889 hsize = skb_headlen(head_skb) - offset;
2834 if (hsize < 0) 2890 if (hsize < 0)
2835 hsize = 0; 2891 hsize = 0;
2836 if (hsize > len || !sg) 2892 if (hsize > len || !sg)
2837 hsize = len; 2893 hsize = len;
2838 2894
2839 if (!hsize && i >= nfrags && skb_headlen(fskb) && 2895 if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
2840 (skb_headlen(fskb) == len || sg)) { 2896 (skb_headlen(list_skb) == len || sg)) {
2841 BUG_ON(skb_headlen(fskb) > len); 2897 BUG_ON(skb_headlen(list_skb) > len);
2842 2898
2843 i = 0; 2899 i = 0;
2844 nfrags = skb_shinfo(fskb)->nr_frags; 2900 nfrags = skb_shinfo(list_skb)->nr_frags;
2845 skb_frag = skb_shinfo(fskb)->frags; 2901 frag = skb_shinfo(list_skb)->frags;
2846 pos += skb_headlen(fskb); 2902 frag_skb = list_skb;
2903 pos += skb_headlen(list_skb);
2847 2904
2848 while (pos < offset + len) { 2905 while (pos < offset + len) {
2849 BUG_ON(i >= nfrags); 2906 BUG_ON(i >= nfrags);
2850 2907
2851 size = skb_frag_size(skb_frag); 2908 size = skb_frag_size(frag);
2852 if (pos + size > offset + len) 2909 if (pos + size > offset + len)
2853 break; 2910 break;
2854 2911
2855 i++; 2912 i++;
2856 pos += size; 2913 pos += size;
2857 skb_frag++; 2914 frag++;
2858 } 2915 }
2859 2916
2860 nskb = skb_clone(fskb, GFP_ATOMIC); 2917 nskb = skb_clone(list_skb, GFP_ATOMIC);
2861 fskb = fskb->next; 2918 list_skb = list_skb->next;
2862 2919
2863 if (unlikely(!nskb)) 2920 if (unlikely(!nskb))
2864 goto err; 2921 goto err;
@@ -2879,7 +2936,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2879 __skb_push(nskb, doffset); 2936 __skb_push(nskb, doffset);
2880 } else { 2937 } else {
2881 nskb = __alloc_skb(hsize + doffset + headroom, 2938 nskb = __alloc_skb(hsize + doffset + headroom,
2882 GFP_ATOMIC, skb_alloc_rx_flag(skb), 2939 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
2883 NUMA_NO_NODE); 2940 NUMA_NO_NODE);
2884 2941
2885 if (unlikely(!nskb)) 2942 if (unlikely(!nskb))
@@ -2895,12 +2952,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2895 segs = nskb; 2952 segs = nskb;
2896 tail = nskb; 2953 tail = nskb;
2897 2954
2898 __copy_skb_header(nskb, skb); 2955 __copy_skb_header(nskb, head_skb);
2899 nskb->mac_len = skb->mac_len; 2956 nskb->mac_len = head_skb->mac_len;
2900 2957
2901 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 2958 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
2902 2959
2903 skb_copy_from_linear_data_offset(skb, -tnl_hlen, 2960 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
2904 nskb->data - tnl_hlen, 2961 nskb->data - tnl_hlen,
2905 doffset + tnl_hlen); 2962 doffset + tnl_hlen);
2906 2963
@@ -2909,30 +2966,32 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2909 2966
2910 if (!sg) { 2967 if (!sg) {
2911 nskb->ip_summed = CHECKSUM_NONE; 2968 nskb->ip_summed = CHECKSUM_NONE;
2912 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2969 nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
2913 skb_put(nskb, len), 2970 skb_put(nskb, len),
2914 len, 0); 2971 len, 0);
2915 continue; 2972 continue;
2916 } 2973 }
2917 2974
2918 frag = skb_shinfo(nskb)->frags; 2975 nskb_frag = skb_shinfo(nskb)->frags;
2919 2976
2920 skb_copy_from_linear_data_offset(skb, offset, 2977 skb_copy_from_linear_data_offset(head_skb, offset,
2921 skb_put(nskb, hsize), hsize); 2978 skb_put(nskb, hsize), hsize);
2922 2979
2923 skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 2980 skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
2981 SKBTX_SHARED_FRAG;
2924 2982
2925 while (pos < offset + len) { 2983 while (pos < offset + len) {
2926 if (i >= nfrags) { 2984 if (i >= nfrags) {
2927 BUG_ON(skb_headlen(fskb)); 2985 BUG_ON(skb_headlen(list_skb));
2928 2986
2929 i = 0; 2987 i = 0;
2930 nfrags = skb_shinfo(fskb)->nr_frags; 2988 nfrags = skb_shinfo(list_skb)->nr_frags;
2931 skb_frag = skb_shinfo(fskb)->frags; 2989 frag = skb_shinfo(list_skb)->frags;
2990 frag_skb = list_skb;
2932 2991
2933 BUG_ON(!nfrags); 2992 BUG_ON(!nfrags);
2934 2993
2935 fskb = fskb->next; 2994 list_skb = list_skb->next;
2936 } 2995 }
2937 2996
2938 if (unlikely(skb_shinfo(nskb)->nr_frags >= 2997 if (unlikely(skb_shinfo(nskb)->nr_frags >=
@@ -2943,27 +3002,30 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2943 goto err; 3002 goto err;
2944 } 3003 }
2945 3004
2946 *frag = *skb_frag; 3005 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
2947 __skb_frag_ref(frag); 3006 goto err;
2948 size = skb_frag_size(frag); 3007
3008 *nskb_frag = *frag;
3009 __skb_frag_ref(nskb_frag);
3010 size = skb_frag_size(nskb_frag);
2949 3011
2950 if (pos < offset) { 3012 if (pos < offset) {
2951 frag->page_offset += offset - pos; 3013 nskb_frag->page_offset += offset - pos;
2952 skb_frag_size_sub(frag, offset - pos); 3014 skb_frag_size_sub(nskb_frag, offset - pos);
2953 } 3015 }
2954 3016
2955 skb_shinfo(nskb)->nr_frags++; 3017 skb_shinfo(nskb)->nr_frags++;
2956 3018
2957 if (pos + size <= offset + len) { 3019 if (pos + size <= offset + len) {
2958 i++; 3020 i++;
2959 skb_frag++; 3021 frag++;
2960 pos += size; 3022 pos += size;
2961 } else { 3023 } else {
2962 skb_frag_size_sub(frag, pos + size - (offset + len)); 3024 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
2963 goto skip_fraglist; 3025 goto skip_fraglist;
2964 } 3026 }
2965 3027
2966 frag++; 3028 nskb_frag++;
2967 } 3029 }
2968 3030
2969skip_fraglist: 3031skip_fraglist:
@@ -2977,15 +3039,12 @@ perform_csum_check:
2977 nskb->len - doffset, 0); 3039 nskb->len - doffset, 0);
2978 nskb->ip_summed = CHECKSUM_NONE; 3040 nskb->ip_summed = CHECKSUM_NONE;
2979 } 3041 }
2980 } while ((offset += len) < skb->len); 3042 } while ((offset += len) < head_skb->len);
2981 3043
2982 return segs; 3044 return segs;
2983 3045
2984err: 3046err:
2985 while ((skb = segs)) { 3047 kfree_skb_list(segs);
2986 segs = skb->next;
2987 kfree_skb(skb);
2988 }
2989 return ERR_PTR(err); 3048 return ERR_PTR(err);
2990} 3049}
2991EXPORT_SYMBOL_GPL(skb_segment); 3050EXPORT_SYMBOL_GPL(skb_segment);
@@ -3468,6 +3527,278 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3468} 3527}
3469EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3528EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3470 3529
3530static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
3531 unsigned int max)
3532{
3533 if (skb_headlen(skb) >= len)
3534 return 0;
3535
3536 /* If we need to pullup then pullup to the max, so we
3537 * won't need to do it again.
3538 */
3539 if (max > skb->len)
3540 max = skb->len;
3541
3542 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
3543 return -ENOMEM;
3544
3545 if (skb_headlen(skb) < len)
3546 return -EPROTO;
3547
3548 return 0;
3549}
3550
3551/* This value should be large enough to cover a tagged ethernet header plus
3552 * maximally sized IP and TCP or UDP headers.
3553 */
3554#define MAX_IP_HDR_LEN 128
3555
3556static int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate)
3557{
3558 unsigned int off;
3559 bool fragment;
3560 int err;
3561
3562 fragment = false;
3563
3564 err = skb_maybe_pull_tail(skb,
3565 sizeof(struct iphdr),
3566 MAX_IP_HDR_LEN);
3567 if (err < 0)
3568 goto out;
3569
3570 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
3571 fragment = true;
3572
3573 off = ip_hdrlen(skb);
3574
3575 err = -EPROTO;
3576
3577 if (fragment)
3578 goto out;
3579
3580 switch (ip_hdr(skb)->protocol) {
3581 case IPPROTO_TCP:
3582 err = skb_maybe_pull_tail(skb,
3583 off + sizeof(struct tcphdr),
3584 MAX_IP_HDR_LEN);
3585 if (err < 0)
3586 goto out;
3587
3588 if (!skb_partial_csum_set(skb, off,
3589 offsetof(struct tcphdr, check))) {
3590 err = -EPROTO;
3591 goto out;
3592 }
3593
3594 if (recalculate)
3595 tcp_hdr(skb)->check =
3596 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3597 ip_hdr(skb)->daddr,
3598 skb->len - off,
3599 IPPROTO_TCP, 0);
3600 break;
3601 case IPPROTO_UDP:
3602 err = skb_maybe_pull_tail(skb,
3603 off + sizeof(struct udphdr),
3604 MAX_IP_HDR_LEN);
3605 if (err < 0)
3606 goto out;
3607
3608 if (!skb_partial_csum_set(skb, off,
3609 offsetof(struct udphdr, check))) {
3610 err = -EPROTO;
3611 goto out;
3612 }
3613
3614 if (recalculate)
3615 udp_hdr(skb)->check =
3616 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3617 ip_hdr(skb)->daddr,
3618 skb->len - off,
3619 IPPROTO_UDP, 0);
3620 break;
3621 default:
3622 goto out;
3623 }
3624
3625 err = 0;
3626
3627out:
3628 return err;
3629}
3630
3631/* This value should be large enough to cover a tagged ethernet header plus
3632 * an IPv6 header, all options, and a maximal TCP or UDP header.
3633 */
3634#define MAX_IPV6_HDR_LEN 256
3635
3636#define OPT_HDR(type, skb, off) \
3637 (type *)(skb_network_header(skb) + (off))
3638
3639static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
3640{
3641 int err;
3642 u8 nexthdr;
3643 unsigned int off;
3644 unsigned int len;
3645 bool fragment;
3646 bool done;
3647
3648 fragment = false;
3649 done = false;
3650
3651 off = sizeof(struct ipv6hdr);
3652
3653 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
3654 if (err < 0)
3655 goto out;
3656
3657 nexthdr = ipv6_hdr(skb)->nexthdr;
3658
3659 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
3660 while (off <= len && !done) {
3661 switch (nexthdr) {
3662 case IPPROTO_DSTOPTS:
3663 case IPPROTO_HOPOPTS:
3664 case IPPROTO_ROUTING: {
3665 struct ipv6_opt_hdr *hp;
3666
3667 err = skb_maybe_pull_tail(skb,
3668 off +
3669 sizeof(struct ipv6_opt_hdr),
3670 MAX_IPV6_HDR_LEN);
3671 if (err < 0)
3672 goto out;
3673
3674 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
3675 nexthdr = hp->nexthdr;
3676 off += ipv6_optlen(hp);
3677 break;
3678 }
3679 case IPPROTO_AH: {
3680 struct ip_auth_hdr *hp;
3681
3682 err = skb_maybe_pull_tail(skb,
3683 off +
3684 sizeof(struct ip_auth_hdr),
3685 MAX_IPV6_HDR_LEN);
3686 if (err < 0)
3687 goto out;
3688
3689 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
3690 nexthdr = hp->nexthdr;
3691 off += ipv6_authlen(hp);
3692 break;
3693 }
3694 case IPPROTO_FRAGMENT: {
3695 struct frag_hdr *hp;
3696
3697 err = skb_maybe_pull_tail(skb,
3698 off +
3699 sizeof(struct frag_hdr),
3700 MAX_IPV6_HDR_LEN);
3701 if (err < 0)
3702 goto out;
3703
3704 hp = OPT_HDR(struct frag_hdr, skb, off);
3705
3706 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
3707 fragment = true;
3708
3709 nexthdr = hp->nexthdr;
3710 off += sizeof(struct frag_hdr);
3711 break;
3712 }
3713 default:
3714 done = true;
3715 break;
3716 }
3717 }
3718
3719 err = -EPROTO;
3720
3721 if (!done || fragment)
3722 goto out;
3723
3724 switch (nexthdr) {
3725 case IPPROTO_TCP:
3726 err = skb_maybe_pull_tail(skb,
3727 off + sizeof(struct tcphdr),
3728 MAX_IPV6_HDR_LEN);
3729 if (err < 0)
3730 goto out;
3731
3732 if (!skb_partial_csum_set(skb, off,
3733 offsetof(struct tcphdr, check))) {
3734 err = -EPROTO;
3735 goto out;
3736 }
3737
3738 if (recalculate)
3739 tcp_hdr(skb)->check =
3740 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3741 &ipv6_hdr(skb)->daddr,
3742 skb->len - off,
3743 IPPROTO_TCP, 0);
3744 break;
3745 case IPPROTO_UDP:
3746 err = skb_maybe_pull_tail(skb,
3747 off + sizeof(struct udphdr),
3748 MAX_IPV6_HDR_LEN);
3749 if (err < 0)
3750 goto out;
3751
3752 if (!skb_partial_csum_set(skb, off,
3753 offsetof(struct udphdr, check))) {
3754 err = -EPROTO;
3755 goto out;
3756 }
3757
3758 if (recalculate)
3759 udp_hdr(skb)->check =
3760 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3761 &ipv6_hdr(skb)->daddr,
3762 skb->len - off,
3763 IPPROTO_UDP, 0);
3764 break;
3765 default:
3766 goto out;
3767 }
3768
3769 err = 0;
3770
3771out:
3772 return err;
3773}
3774
3775/**
3776 * skb_checksum_setup - set up partial checksum offset
3777 * @skb: the skb to set up
3778 * @recalculate: if true the pseudo-header checksum will be recalculated
3779 */
3780int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
3781{
3782 int err;
3783
3784 switch (skb->protocol) {
3785 case htons(ETH_P_IP):
3786 err = skb_checksum_setup_ip(skb, recalculate);
3787 break;
3788
3789 case htons(ETH_P_IPV6):
3790 err = skb_checksum_setup_ipv6(skb, recalculate);
3791 break;
3792
3793 default:
3794 err = -EPROTO;
3795 break;
3796 }
3797
3798 return err;
3799}
3800EXPORT_SYMBOL(skb_checksum_setup);
3801
3471void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3802void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3472{ 3803{
3473 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 3804 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
@@ -3592,3 +3923,26 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
3592 nf_reset_trace(skb); 3923 nf_reset_trace(skb);
3593} 3924}
3594EXPORT_SYMBOL_GPL(skb_scrub_packet); 3925EXPORT_SYMBOL_GPL(skb_scrub_packet);
3926
3927/**
3928 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
3929 *
3930 * @skb: GSO skb
3931 *
3932 * skb_gso_transport_seglen is used to determine the real size of the
3933 * individual segments, including Layer4 headers (TCP/UDP).
3934 *
3935 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
3936 */
3937unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
3938{
3939 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3940 unsigned int hdr_len;
3941
3942 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3943 hdr_len = tcp_hdrlen(skb);
3944 else
3945 hdr_len = sizeof(struct udphdr);
3946 return hdr_len + shinfo->gso_size;
3947}
3948EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);