diff options
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 132 |
1 files changed, 98 insertions, 34 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 44f6a181a754..c54f3664bce5 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -71,13 +71,6 @@ static kmem_cache_t *skbuff_head_cache __read_mostly; | |||
71 | static kmem_cache_t *skbuff_fclone_cache __read_mostly; | 71 | static kmem_cache_t *skbuff_fclone_cache __read_mostly; |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * lockdep: lock class key used by skb_queue_head_init(): | ||
75 | */ | ||
76 | struct lock_class_key skb_queue_lock_key; | ||
77 | |||
78 | EXPORT_SYMBOL(skb_queue_lock_key); | ||
79 | |||
80 | /* | ||
81 | * Keep out-of-line to prevent kernel bloat. | 74 | * Keep out-of-line to prevent kernel bloat. |
82 | * __builtin_return_address is not used because it is not always | 75 | * __builtin_return_address is not used because it is not always |
83 | * reliable. | 76 | * reliable. |
@@ -256,12 +249,37 @@ nodata: | |||
256 | goto out; | 249 | goto out; |
257 | } | 250 | } |
258 | 251 | ||
252 | /** | ||
253 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device | ||
254 | * @dev: network device to receive on | ||
255 | * @length: length to allocate | ||
256 | * @gfp_mask: get_free_pages mask, passed to alloc_skb | ||
257 | * | ||
258 | * Allocate a new &sk_buff and assign it a usage count of one. The | ||
259 | * buffer has unspecified headroom built in. Users should allocate | ||
260 | * the headroom they think they need without accounting for the | ||
261 | * built in space. The built in space is used for optimisations. | ||
262 | * | ||
263 | * %NULL is returned if there is no free memory. | ||
264 | */ | ||
265 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, | ||
266 | unsigned int length, gfp_t gfp_mask) | ||
267 | { | ||
268 | struct sk_buff *skb; | ||
259 | 269 | ||
260 | static void skb_drop_fraglist(struct sk_buff *skb) | 270 | skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); |
271 | if (likely(skb)) { | ||
272 | skb_reserve(skb, NET_SKB_PAD); | ||
273 | skb->dev = dev; | ||
274 | } | ||
275 | return skb; | ||
276 | } | ||
277 | |||
278 | static void skb_drop_list(struct sk_buff **listp) | ||
261 | { | 279 | { |
262 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 280 | struct sk_buff *list = *listp; |
263 | 281 | ||
264 | skb_shinfo(skb)->frag_list = NULL; | 282 | *listp = NULL; |
265 | 283 | ||
266 | do { | 284 | do { |
267 | struct sk_buff *this = list; | 285 | struct sk_buff *this = list; |
@@ -270,6 +288,11 @@ static void skb_drop_fraglist(struct sk_buff *skb) | |||
270 | } while (list); | 288 | } while (list); |
271 | } | 289 | } |
272 | 290 | ||
291 | static inline void skb_drop_fraglist(struct sk_buff *skb) | ||
292 | { | ||
293 | skb_drop_list(&skb_shinfo(skb)->frag_list); | ||
294 | } | ||
295 | |||
273 | static void skb_clone_fraglist(struct sk_buff *skb) | 296 | static void skb_clone_fraglist(struct sk_buff *skb) |
274 | { | 297 | { |
275 | struct sk_buff *list; | 298 | struct sk_buff *list; |
@@ -830,41 +853,81 @@ free_skb: | |||
830 | 853 | ||
831 | int ___pskb_trim(struct sk_buff *skb, unsigned int len) | 854 | int ___pskb_trim(struct sk_buff *skb, unsigned int len) |
832 | { | 855 | { |
856 | struct sk_buff **fragp; | ||
857 | struct sk_buff *frag; | ||
833 | int offset = skb_headlen(skb); | 858 | int offset = skb_headlen(skb); |
834 | int nfrags = skb_shinfo(skb)->nr_frags; | 859 | int nfrags = skb_shinfo(skb)->nr_frags; |
835 | int i; | 860 | int i; |
861 | int err; | ||
836 | 862 | ||
837 | for (i = 0; i < nfrags; i++) { | 863 | if (skb_cloned(skb) && |
864 | unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) | ||
865 | return err; | ||
866 | |||
867 | i = 0; | ||
868 | if (offset >= len) | ||
869 | goto drop_pages; | ||
870 | |||
871 | for (; i < nfrags; i++) { | ||
838 | int end = offset + skb_shinfo(skb)->frags[i].size; | 872 | int end = offset + skb_shinfo(skb)->frags[i].size; |
839 | if (end > len) { | 873 | |
840 | if (skb_cloned(skb)) { | 874 | if (end < len) { |
841 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | 875 | offset = end; |
842 | return -ENOMEM; | 876 | continue; |
843 | } | ||
844 | if (len <= offset) { | ||
845 | put_page(skb_shinfo(skb)->frags[i].page); | ||
846 | skb_shinfo(skb)->nr_frags--; | ||
847 | } else { | ||
848 | skb_shinfo(skb)->frags[i].size = len - offset; | ||
849 | } | ||
850 | } | 877 | } |
851 | offset = end; | 878 | |
879 | skb_shinfo(skb)->frags[i++].size = len - offset; | ||
880 | |||
881 | drop_pages: | ||
882 | skb_shinfo(skb)->nr_frags = i; | ||
883 | |||
884 | for (; i < nfrags; i++) | ||
885 | put_page(skb_shinfo(skb)->frags[i].page); | ||
886 | |||
887 | if (skb_shinfo(skb)->frag_list) | ||
888 | skb_drop_fraglist(skb); | ||
889 | goto done; | ||
852 | } | 890 | } |
853 | 891 | ||
854 | if (offset < len) { | 892 | for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); |
893 | fragp = &frag->next) { | ||
894 | int end = offset + frag->len; | ||
895 | |||
896 | if (skb_shared(frag)) { | ||
897 | struct sk_buff *nfrag; | ||
898 | |||
899 | nfrag = skb_clone(frag, GFP_ATOMIC); | ||
900 | if (unlikely(!nfrag)) | ||
901 | return -ENOMEM; | ||
902 | |||
903 | nfrag->next = frag->next; | ||
904 | kfree_skb(frag); | ||
905 | frag = nfrag; | ||
906 | *fragp = frag; | ||
907 | } | ||
908 | |||
909 | if (end < len) { | ||
910 | offset = end; | ||
911 | continue; | ||
912 | } | ||
913 | |||
914 | if (end > len && | ||
915 | unlikely((err = pskb_trim(frag, len - offset)))) | ||
916 | return err; | ||
917 | |||
918 | if (frag->next) | ||
919 | skb_drop_list(&frag->next); | ||
920 | break; | ||
921 | } | ||
922 | |||
923 | done: | ||
924 | if (len > skb_headlen(skb)) { | ||
855 | skb->data_len -= skb->len - len; | 925 | skb->data_len -= skb->len - len; |
856 | skb->len = len; | 926 | skb->len = len; |
857 | } else { | 927 | } else { |
858 | if (len <= skb_headlen(skb)) { | 928 | skb->len = len; |
859 | skb->len = len; | 929 | skb->data_len = 0; |
860 | skb->data_len = 0; | 930 | skb->tail = skb->data + len; |
861 | skb->tail = skb->data + len; | ||
862 | if (skb_shinfo(skb)->frag_list && !skb_cloned(skb)) | ||
863 | skb_drop_fraglist(skb); | ||
864 | } else { | ||
865 | skb->data_len -= skb->len - len; | ||
866 | skb->len = len; | ||
867 | } | ||
868 | } | 931 | } |
869 | 932 | ||
870 | return 0; | 933 | return 0; |
@@ -2003,6 +2066,7 @@ EXPORT_SYMBOL(__kfree_skb); | |||
2003 | EXPORT_SYMBOL(kfree_skb); | 2066 | EXPORT_SYMBOL(kfree_skb); |
2004 | EXPORT_SYMBOL(__pskb_pull_tail); | 2067 | EXPORT_SYMBOL(__pskb_pull_tail); |
2005 | EXPORT_SYMBOL(__alloc_skb); | 2068 | EXPORT_SYMBOL(__alloc_skb); |
2069 | EXPORT_SYMBOL(__netdev_alloc_skb); | ||
2006 | EXPORT_SYMBOL(pskb_copy); | 2070 | EXPORT_SYMBOL(pskb_copy); |
2007 | EXPORT_SYMBOL(pskb_expand_head); | 2071 | EXPORT_SYMBOL(pskb_expand_head); |
2008 | EXPORT_SYMBOL(skb_checksum); | 2072 | EXPORT_SYMBOL(skb_checksum); |