aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2013-11-21 14:10:04 -0500
committerDavid S. Miller <davem@davemloft.net>2013-11-21 14:11:50 -0500
commit9d8506cc2d7ea1f911c72c100193a3677f6668c3 (patch)
tree33eec3f9e567cbf3141f7a3824559beb0a7cea3a /net
parent91398a0992c8aa18eb7749060b75761ece5ddc57 (diff)
gso: handle new frag_list of frags GRO packets
Recently GRO started generating packets with frag_lists of frags. This was not handled by GSO, thus leading to a crash. Thankfully these packets are of a regular form and are easy to handle. This patch handles them in two ways. For completely non-linear frag_list entries, we simply continue to iterate over the frag_list frags once we exhaust the normal frags. For frag_list entries with linear parts, we call pskb_trim on the first part of the frag_list skb, and then process the rest of the frags in the usual way. This patch also kills a chunk of dead frag_list code that has obviously never ever been run since it ends up generating a bogus GSO-segmented packet with a frag_list entry. Future work is planned to split super big packets into TSO ones. Fixes: 8a29111c7ca6 ("net: gro: allow to build full sized skb") Reported-by: Christoph Paasch <christoph.paasch@uclouvain.be> Reported-by: Jerry Chu <hkchu@google.com> Reported-by: Sander Eikelenboom <linux@eikelenboom.it> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Eric Dumazet <edumazet@google.com> Tested-by: Sander Eikelenboom <linux@eikelenboom.it> Tested-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/skbuff.c75
1 files changed, 50 insertions, 25 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8cec1e6b844d..2718fed53d8c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2796,6 +2796,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2796 struct sk_buff *segs = NULL; 2796 struct sk_buff *segs = NULL;
2797 struct sk_buff *tail = NULL; 2797 struct sk_buff *tail = NULL;
2798 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2798 struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2799 skb_frag_t *skb_frag = skb_shinfo(skb)->frags;
2799 unsigned int mss = skb_shinfo(skb)->gso_size; 2800 unsigned int mss = skb_shinfo(skb)->gso_size;
2800 unsigned int doffset = skb->data - skb_mac_header(skb); 2801 unsigned int doffset = skb->data - skb_mac_header(skb);
2801 unsigned int offset = doffset; 2802 unsigned int offset = doffset;
@@ -2835,16 +2836,38 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2835 if (hsize > len || !sg) 2836 if (hsize > len || !sg)
2836 hsize = len; 2837 hsize = len;
2837 2838
2838 if (!hsize && i >= nfrags) { 2839 if (!hsize && i >= nfrags && skb_headlen(fskb) &&
2839 BUG_ON(fskb->len != len); 2840 (skb_headlen(fskb) == len || sg)) {
2841 BUG_ON(skb_headlen(fskb) > len);
2842
2843 i = 0;
2844 nfrags = skb_shinfo(fskb)->nr_frags;
2845 skb_frag = skb_shinfo(fskb)->frags;
2846 pos += skb_headlen(fskb);
2847
2848 while (pos < offset + len) {
2849 BUG_ON(i >= nfrags);
2850
2851 size = skb_frag_size(skb_frag);
2852 if (pos + size > offset + len)
2853 break;
2854
2855 i++;
2856 pos += size;
2857 skb_frag++;
2858 }
2840 2859
2841 pos += len;
2842 nskb = skb_clone(fskb, GFP_ATOMIC); 2860 nskb = skb_clone(fskb, GFP_ATOMIC);
2843 fskb = fskb->next; 2861 fskb = fskb->next;
2844 2862
2845 if (unlikely(!nskb)) 2863 if (unlikely(!nskb))
2846 goto err; 2864 goto err;
2847 2865
2866 if (unlikely(pskb_trim(nskb, len))) {
2867 kfree_skb(nskb);
2868 goto err;
2869 }
2870
2848 hsize = skb_end_offset(nskb); 2871 hsize = skb_end_offset(nskb);
2849 if (skb_cow_head(nskb, doffset + headroom)) { 2872 if (skb_cow_head(nskb, doffset + headroom)) {
2850 kfree_skb(nskb); 2873 kfree_skb(nskb);
@@ -2881,7 +2904,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2881 nskb->data - tnl_hlen, 2904 nskb->data - tnl_hlen,
2882 doffset + tnl_hlen); 2905 doffset + tnl_hlen);
2883 2906
2884 if (fskb != skb_shinfo(skb)->frag_list) 2907 if (nskb->len == len + doffset)
2885 goto perform_csum_check; 2908 goto perform_csum_check;
2886 2909
2887 if (!sg) { 2910 if (!sg) {
@@ -2899,8 +2922,28 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2899 2922
2900 skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 2923 skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2901 2924
2902 while (pos < offset + len && i < nfrags) { 2925 while (pos < offset + len) {
2903 *frag = skb_shinfo(skb)->frags[i]; 2926 if (i >= nfrags) {
2927 BUG_ON(skb_headlen(fskb));
2928
2929 i = 0;
2930 nfrags = skb_shinfo(fskb)->nr_frags;
2931 skb_frag = skb_shinfo(fskb)->frags;
2932
2933 BUG_ON(!nfrags);
2934
2935 fskb = fskb->next;
2936 }
2937
2938 if (unlikely(skb_shinfo(nskb)->nr_frags >=
2939 MAX_SKB_FRAGS)) {
2940 net_warn_ratelimited(
2941 "skb_segment: too many frags: %u %u\n",
2942 pos, mss);
2943 goto err;
2944 }
2945
2946 *frag = *skb_frag;
2904 __skb_frag_ref(frag); 2947 __skb_frag_ref(frag);
2905 size = skb_frag_size(frag); 2948 size = skb_frag_size(frag);
2906 2949
@@ -2913,6 +2956,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2913 2956
2914 if (pos + size <= offset + len) { 2957 if (pos + size <= offset + len) {
2915 i++; 2958 i++;
2959 skb_frag++;
2916 pos += size; 2960 pos += size;
2917 } else { 2961 } else {
2918 skb_frag_size_sub(frag, pos + size - (offset + len)); 2962 skb_frag_size_sub(frag, pos + size - (offset + len));
@@ -2922,25 +2966,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2922 frag++; 2966 frag++;
2923 } 2967 }
2924 2968
2925 if (pos < offset + len) {
2926 struct sk_buff *fskb2 = fskb;
2927
2928 BUG_ON(pos + fskb->len != offset + len);
2929
2930 pos += fskb->len;
2931 fskb = fskb->next;
2932
2933 if (fskb2->next) {
2934 fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2935 if (!fskb2)
2936 goto err;
2937 } else
2938 skb_get(fskb2);
2939
2940 SKB_FRAG_ASSERT(nskb);
2941 skb_shinfo(nskb)->frag_list = fskb2;
2942 }
2943
2944skip_fraglist: 2969skip_fraglist:
2945 nskb->data_len = len - hsize; 2970 nskb->data_len = len - hsize;
2946 nskb->len += nskb->data_len; 2971 nskb->len += nskb->data_len;