diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-08-07 09:16:35 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-08-07 09:16:35 -0400 |
commit | ad73c67e792c752ddc99f2b0587abae05255dd6d (patch) | |
tree | e33d5d37a8c56b6fad753fa2d481f43cfb60ae1d /net/core/skbuff.c | |
parent | 08eac93a689ca05c7b7413f6d362c7d38b5fd5b1 (diff) | |
parent | 9f737633e6ee54fc174282d49b2559bd2208391d (diff) |
Merge branch 'master'
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 45 |
1 files changed, 34 insertions, 11 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 476aa3978504..022d8894c11d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -71,13 +71,6 @@ static kmem_cache_t *skbuff_head_cache __read_mostly; | |||
71 | static kmem_cache_t *skbuff_fclone_cache __read_mostly; | 71 | static kmem_cache_t *skbuff_fclone_cache __read_mostly; |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * lockdep: lock class key used by skb_queue_head_init(): | ||
75 | */ | ||
76 | struct lock_class_key skb_queue_lock_key; | ||
77 | |||
78 | EXPORT_SYMBOL(skb_queue_lock_key); | ||
79 | |||
80 | /* | ||
81 | * Keep out-of-line to prevent kernel bloat. | 74 | * Keep out-of-line to prevent kernel bloat. |
82 | * __builtin_return_address is not used because it is not always | 75 | * __builtin_return_address is not used because it is not always |
83 | * reliable. | 76 | * reliable. |
@@ -256,6 +249,29 @@ nodata: | |||
256 | goto out; | 249 | goto out; |
257 | } | 250 | } |
258 | 251 | ||
252 | /** | ||
253 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device | ||
254 | * @dev: network device to receive on | ||
255 | * @length: length to allocate | ||
256 | * @gfp_mask: get_free_pages mask, passed to alloc_skb | ||
257 | * | ||
258 | * Allocate a new &sk_buff and assign it a usage count of one. The | ||
259 | * buffer has unspecified headroom built in. Users should allocate | ||
260 | * the headroom they think they need without accounting for the | ||
261 | * built in space. The built in space is used for optimisations. | ||
262 | * | ||
263 | * %NULL is returned if there is no free memory. | ||
264 | */ | ||
265 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, | ||
266 | unsigned int length, gfp_t gfp_mask) | ||
267 | { | ||
268 | struct sk_buff *skb; | ||
269 | |||
270 | skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); | ||
271 | if (likely(skb)) | ||
272 | skb_reserve(skb, NET_SKB_PAD); | ||
273 | return skb; | ||
274 | } | ||
259 | 275 | ||
260 | static void skb_drop_list(struct sk_buff **listp) | 276 | static void skb_drop_list(struct sk_buff **listp) |
261 | { | 277 | { |
@@ -846,7 +862,11 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len) | |||
846 | unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) | 862 | unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) |
847 | return err; | 863 | return err; |
848 | 864 | ||
849 | for (i = 0; i < nfrags; i++) { | 865 | i = 0; |
866 | if (offset >= len) | ||
867 | goto drop_pages; | ||
868 | |||
869 | for (; i < nfrags; i++) { | ||
850 | int end = offset + skb_shinfo(skb)->frags[i].size; | 870 | int end = offset + skb_shinfo(skb)->frags[i].size; |
851 | 871 | ||
852 | if (end < len) { | 872 | if (end < len) { |
@@ -854,9 +874,9 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len) | |||
854 | continue; | 874 | continue; |
855 | } | 875 | } |
856 | 876 | ||
857 | if (len > offset) | 877 | skb_shinfo(skb)->frags[i++].size = len - offset; |
858 | skb_shinfo(skb)->frags[i++].size = len - offset; | ||
859 | 878 | ||
879 | drop_pages: | ||
860 | skb_shinfo(skb)->nr_frags = i; | 880 | skb_shinfo(skb)->nr_frags = i; |
861 | 881 | ||
862 | for (; i < nfrags; i++) | 882 | for (; i < nfrags; i++) |
@@ -864,7 +884,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len) | |||
864 | 884 | ||
865 | if (skb_shinfo(skb)->frag_list) | 885 | if (skb_shinfo(skb)->frag_list) |
866 | skb_drop_fraglist(skb); | 886 | skb_drop_fraglist(skb); |
867 | break; | 887 | goto done; |
868 | } | 888 | } |
869 | 889 | ||
870 | for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); | 890 | for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); |
@@ -879,6 +899,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len) | |||
879 | return -ENOMEM; | 899 | return -ENOMEM; |
880 | 900 | ||
881 | nfrag->next = frag->next; | 901 | nfrag->next = frag->next; |
902 | kfree_skb(frag); | ||
882 | frag = nfrag; | 903 | frag = nfrag; |
883 | *fragp = frag; | 904 | *fragp = frag; |
884 | } | 905 | } |
@@ -897,6 +918,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len) | |||
897 | break; | 918 | break; |
898 | } | 919 | } |
899 | 920 | ||
921 | done: | ||
900 | if (len > skb_headlen(skb)) { | 922 | if (len > skb_headlen(skb)) { |
901 | skb->data_len -= skb->len - len; | 923 | skb->data_len -= skb->len - len; |
902 | skb->len = len; | 924 | skb->len = len; |
@@ -2042,6 +2064,7 @@ EXPORT_SYMBOL(__kfree_skb); | |||
2042 | EXPORT_SYMBOL(kfree_skb); | 2064 | EXPORT_SYMBOL(kfree_skb); |
2043 | EXPORT_SYMBOL(__pskb_pull_tail); | 2065 | EXPORT_SYMBOL(__pskb_pull_tail); |
2044 | EXPORT_SYMBOL(__alloc_skb); | 2066 | EXPORT_SYMBOL(__alloc_skb); |
2067 | EXPORT_SYMBOL(__netdev_alloc_skb); | ||
2045 | EXPORT_SYMBOL(pskb_copy); | 2068 | EXPORT_SYMBOL(pskb_copy); |
2046 | EXPORT_SYMBOL(pskb_expand_head); | 2069 | EXPORT_SYMBOL(pskb_expand_head); |
2047 | EXPORT_SYMBOL(skb_checksum); | 2070 | EXPORT_SYMBOL(skb_checksum); |