diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2006-07-13 22:26:39 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2006-07-13 22:26:39 -0400 |
commit | 27b437c8b7d519aac70a0254c2e04c29eff565a2 (patch) | |
tree | 2391668d15da8a33fabd46ddf09594baa3c4b27e /net | |
parent | ab6cf0d0cb96417ef65cc2c2120c0e879edf7a4a (diff) |
[NET]: Update frag_list in pskb_trim
When pskb_trim has to defer to ___pksb_trim to trim the frag_list part of
the packet, the frag_list is not updated to reflect the trimming. This
will usually work fine until you hit something that uses the packet length
or tail from the frag_list.
Examples include esp_output and ip_fragment.
Another problem caused by this is that you can end up with a linear packet
with a frag_list attached.
It is possible to get away with this if we audit everything to make sure
that they always consult skb->len before going down onto frag_list. In
fact we can do the samething for the paged part as well to avoid copying
the data area of the skb. For now though, let's do the conservative fix
and update frag_list.
Many thanks to Marco Berizzi for helping me to track down this bug.
This 4-year old bug took 3 months to track down. Marco was very patient
indeed :)
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/skbuff.c | 91 |
1 files changed, 65 insertions, 26 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 44f6a181a754..476aa3978504 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -257,11 +257,11 @@ nodata: | |||
257 | } | 257 | } |
258 | 258 | ||
259 | 259 | ||
260 | static void skb_drop_fraglist(struct sk_buff *skb) | 260 | static void skb_drop_list(struct sk_buff **listp) |
261 | { | 261 | { |
262 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 262 | struct sk_buff *list = *listp; |
263 | 263 | ||
264 | skb_shinfo(skb)->frag_list = NULL; | 264 | *listp = NULL; |
265 | 265 | ||
266 | do { | 266 | do { |
267 | struct sk_buff *this = list; | 267 | struct sk_buff *this = list; |
@@ -270,6 +270,11 @@ static void skb_drop_fraglist(struct sk_buff *skb) | |||
270 | } while (list); | 270 | } while (list); |
271 | } | 271 | } |
272 | 272 | ||
273 | static inline void skb_drop_fraglist(struct sk_buff *skb) | ||
274 | { | ||
275 | skb_drop_list(&skb_shinfo(skb)->frag_list); | ||
276 | } | ||
277 | |||
273 | static void skb_clone_fraglist(struct sk_buff *skb) | 278 | static void skb_clone_fraglist(struct sk_buff *skb) |
274 | { | 279 | { |
275 | struct sk_buff *list; | 280 | struct sk_buff *list; |
@@ -830,41 +835,75 @@ free_skb: | |||
830 | 835 | ||
831 | int ___pskb_trim(struct sk_buff *skb, unsigned int len) | 836 | int ___pskb_trim(struct sk_buff *skb, unsigned int len) |
832 | { | 837 | { |
838 | struct sk_buff **fragp; | ||
839 | struct sk_buff *frag; | ||
833 | int offset = skb_headlen(skb); | 840 | int offset = skb_headlen(skb); |
834 | int nfrags = skb_shinfo(skb)->nr_frags; | 841 | int nfrags = skb_shinfo(skb)->nr_frags; |
835 | int i; | 842 | int i; |
843 | int err; | ||
844 | |||
845 | if (skb_cloned(skb) && | ||
846 | unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) | ||
847 | return err; | ||
836 | 848 | ||
837 | for (i = 0; i < nfrags; i++) { | 849 | for (i = 0; i < nfrags; i++) { |
838 | int end = offset + skb_shinfo(skb)->frags[i].size; | 850 | int end = offset + skb_shinfo(skb)->frags[i].size; |
839 | if (end > len) { | 851 | |
840 | if (skb_cloned(skb)) { | 852 | if (end < len) { |
841 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | 853 | offset = end; |
842 | return -ENOMEM; | 854 | continue; |
843 | } | ||
844 | if (len <= offset) { | ||
845 | put_page(skb_shinfo(skb)->frags[i].page); | ||
846 | skb_shinfo(skb)->nr_frags--; | ||
847 | } else { | ||
848 | skb_shinfo(skb)->frags[i].size = len - offset; | ||
849 | } | ||
850 | } | 855 | } |
851 | offset = end; | 856 | |
857 | if (len > offset) | ||
858 | skb_shinfo(skb)->frags[i++].size = len - offset; | ||
859 | |||
860 | skb_shinfo(skb)->nr_frags = i; | ||
861 | |||
862 | for (; i < nfrags; i++) | ||
863 | put_page(skb_shinfo(skb)->frags[i].page); | ||
864 | |||
865 | if (skb_shinfo(skb)->frag_list) | ||
866 | skb_drop_fraglist(skb); | ||
867 | break; | ||
852 | } | 868 | } |
853 | 869 | ||
854 | if (offset < len) { | 870 | for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); |
871 | fragp = &frag->next) { | ||
872 | int end = offset + frag->len; | ||
873 | |||
874 | if (skb_shared(frag)) { | ||
875 | struct sk_buff *nfrag; | ||
876 | |||
877 | nfrag = skb_clone(frag, GFP_ATOMIC); | ||
878 | if (unlikely(!nfrag)) | ||
879 | return -ENOMEM; | ||
880 | |||
881 | nfrag->next = frag->next; | ||
882 | frag = nfrag; | ||
883 | *fragp = frag; | ||
884 | } | ||
885 | |||
886 | if (end < len) { | ||
887 | offset = end; | ||
888 | continue; | ||
889 | } | ||
890 | |||
891 | if (end > len && | ||
892 | unlikely((err = pskb_trim(frag, len - offset)))) | ||
893 | return err; | ||
894 | |||
895 | if (frag->next) | ||
896 | skb_drop_list(&frag->next); | ||
897 | break; | ||
898 | } | ||
899 | |||
900 | if (len > skb_headlen(skb)) { | ||
855 | skb->data_len -= skb->len - len; | 901 | skb->data_len -= skb->len - len; |
856 | skb->len = len; | 902 | skb->len = len; |
857 | } else { | 903 | } else { |
858 | if (len <= skb_headlen(skb)) { | 904 | skb->len = len; |
859 | skb->len = len; | 905 | skb->data_len = 0; |
860 | skb->data_len = 0; | 906 | skb->tail = skb->data + len; |
861 | skb->tail = skb->data + len; | ||
862 | if (skb_shinfo(skb)->frag_list && !skb_cloned(skb)) | ||
863 | skb_drop_fraglist(skb); | ||
864 | } else { | ||
865 | skb->data_len -= skb->len - len; | ||
866 | skb->len = len; | ||
867 | } | ||
868 | } | 907 | } |
869 | 908 | ||
870 | return 0; | 909 | return 0; |