diff options
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 73 |
1 files changed, 40 insertions, 33 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 46a3d23d259e..368f65c15e4f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -160,8 +160,8 @@ static void skb_under_panic(struct sk_buff *skb, int sz, void *here) | |||
160 | * @node: numa node to allocate memory on | 160 | * @node: numa node to allocate memory on |
161 | * | 161 | * |
162 | * Allocate a new &sk_buff. The returned buffer has no headroom and a | 162 | * Allocate a new &sk_buff. The returned buffer has no headroom and a |
163 | * tail room of size bytes. The object has a reference count of one. | 163 | * tail room of at least size bytes. The object has a reference count |
164 | * The return is the buffer. On a failure the return is %NULL. | 164 | * of one. The return is the buffer. On a failure the return is %NULL. |
165 | * | 165 | * |
166 | * Buffers may only be allocated from interrupts using a @gfp_mask of | 166 | * Buffers may only be allocated from interrupts using a @gfp_mask of |
167 | * %GFP_ATOMIC. | 167 | * %GFP_ATOMIC. |
@@ -296,9 +296,12 @@ EXPORT_SYMBOL(build_skb); | |||
296 | struct netdev_alloc_cache { | 296 | struct netdev_alloc_cache { |
297 | struct page *page; | 297 | struct page *page; |
298 | unsigned int offset; | 298 | unsigned int offset; |
299 | unsigned int pagecnt_bias; | ||
299 | }; | 300 | }; |
300 | static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); | 301 | static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); |
301 | 302 | ||
303 | #define NETDEV_PAGECNT_BIAS (PAGE_SIZE / SMP_CACHE_BYTES) | ||
304 | |||
302 | /** | 305 | /** |
303 | * netdev_alloc_frag - allocate a page fragment | 306 | * netdev_alloc_frag - allocate a page fragment |
304 | * @fragsz: fragment size | 307 | * @fragsz: fragment size |
@@ -317,17 +320,26 @@ void *netdev_alloc_frag(unsigned int fragsz) | |||
317 | if (unlikely(!nc->page)) { | 320 | if (unlikely(!nc->page)) { |
318 | refill: | 321 | refill: |
319 | nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD); | 322 | nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD); |
323 | if (unlikely(!nc->page)) | ||
324 | goto end; | ||
325 | recycle: | ||
326 | atomic_set(&nc->page->_count, NETDEV_PAGECNT_BIAS); | ||
327 | nc->pagecnt_bias = NETDEV_PAGECNT_BIAS; | ||
320 | nc->offset = 0; | 328 | nc->offset = 0; |
321 | } | 329 | } |
322 | if (likely(nc->page)) { | 330 | |
323 | if (nc->offset + fragsz > PAGE_SIZE) { | 331 | if (nc->offset + fragsz > PAGE_SIZE) { |
324 | put_page(nc->page); | 332 | /* avoid unnecessary locked operations if possible */ |
325 | goto refill; | 333 | if ((atomic_read(&nc->page->_count) == nc->pagecnt_bias) || |
326 | } | 334 | atomic_sub_and_test(nc->pagecnt_bias, &nc->page->_count)) |
327 | data = page_address(nc->page) + nc->offset; | 335 | goto recycle; |
328 | nc->offset += fragsz; | 336 | goto refill; |
329 | get_page(nc->page); | ||
330 | } | 337 | } |
338 | |||
339 | data = page_address(nc->page) + nc->offset; | ||
340 | nc->offset += fragsz; | ||
341 | nc->pagecnt_bias--; | ||
342 | end: | ||
331 | local_irq_restore(flags); | 343 | local_irq_restore(flags); |
332 | return data; | 344 | return data; |
333 | } | 345 | } |
@@ -353,7 +365,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, | |||
353 | unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + | 365 | unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + |
354 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 366 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
355 | 367 | ||
356 | if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) { | 368 | if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { |
357 | void *data = netdev_alloc_frag(fragsz); | 369 | void *data = netdev_alloc_frag(fragsz); |
358 | 370 | ||
359 | if (likely(data)) { | 371 | if (likely(data)) { |
@@ -713,7 +725,8 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) | |||
713 | } | 725 | } |
714 | EXPORT_SYMBOL_GPL(skb_morph); | 726 | EXPORT_SYMBOL_GPL(skb_morph); |
715 | 727 | ||
716 | /* skb_copy_ubufs - copy userspace skb frags buffers to kernel | 728 | /** |
729 | * skb_copy_ubufs - copy userspace skb frags buffers to kernel | ||
717 | * @skb: the skb to modify | 730 | * @skb: the skb to modify |
718 | * @gfp_mask: allocation priority | 731 | * @gfp_mask: allocation priority |
719 | * | 732 | * |
@@ -738,7 +751,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) | |||
738 | u8 *vaddr; | 751 | u8 *vaddr; |
739 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; | 752 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
740 | 753 | ||
741 | page = alloc_page(GFP_ATOMIC); | 754 | page = alloc_page(gfp_mask); |
742 | if (!page) { | 755 | if (!page) { |
743 | while (head) { | 756 | while (head) { |
744 | struct page *next = (struct page *)head->private; | 757 | struct page *next = (struct page *)head->private; |
@@ -756,22 +769,22 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) | |||
756 | } | 769 | } |
757 | 770 | ||
758 | /* skb frags release userspace buffers */ | 771 | /* skb frags release userspace buffers */ |
759 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 772 | for (i = 0; i < num_frags; i++) |
760 | skb_frag_unref(skb, i); | 773 | skb_frag_unref(skb, i); |
761 | 774 | ||
762 | uarg->callback(uarg); | 775 | uarg->callback(uarg); |
763 | 776 | ||
764 | /* skb frags point to kernel buffers */ | 777 | /* skb frags point to kernel buffers */ |
765 | for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { | 778 | for (i = num_frags - 1; i >= 0; i--) { |
766 | __skb_fill_page_desc(skb, i-1, head, 0, | 779 | __skb_fill_page_desc(skb, i, head, 0, |
767 | skb_shinfo(skb)->frags[i - 1].size); | 780 | skb_shinfo(skb)->frags[i].size); |
768 | head = (struct page *)head->private; | 781 | head = (struct page *)head->private; |
769 | } | 782 | } |
770 | 783 | ||
771 | skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; | 784 | skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; |
772 | return 0; | 785 | return 0; |
773 | } | 786 | } |
774 | 787 | EXPORT_SYMBOL_GPL(skb_copy_ubufs); | |
775 | 788 | ||
776 | /** | 789 | /** |
777 | * skb_clone - duplicate an sk_buff | 790 | * skb_clone - duplicate an sk_buff |
@@ -791,10 +804,8 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
791 | { | 804 | { |
792 | struct sk_buff *n; | 805 | struct sk_buff *n; |
793 | 806 | ||
794 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { | 807 | if (skb_orphan_frags(skb, gfp_mask)) |
795 | if (skb_copy_ubufs(skb, gfp_mask)) | 808 | return NULL; |
796 | return NULL; | ||
797 | } | ||
798 | 809 | ||
799 | n = skb + 1; | 810 | n = skb + 1; |
800 | if (skb->fclone == SKB_FCLONE_ORIG && | 811 | if (skb->fclone == SKB_FCLONE_ORIG && |
@@ -914,12 +925,10 @@ struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) | |||
914 | if (skb_shinfo(skb)->nr_frags) { | 925 | if (skb_shinfo(skb)->nr_frags) { |
915 | int i; | 926 | int i; |
916 | 927 | ||
917 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { | 928 | if (skb_orphan_frags(skb, gfp_mask)) { |
918 | if (skb_copy_ubufs(skb, gfp_mask)) { | 929 | kfree_skb(n); |
919 | kfree_skb(n); | 930 | n = NULL; |
920 | n = NULL; | 931 | goto out; |
921 | goto out; | ||
922 | } | ||
923 | } | 932 | } |
924 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 933 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
925 | skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; | 934 | skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; |
@@ -992,10 +1001,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
992 | */ | 1001 | */ |
993 | if (skb_cloned(skb)) { | 1002 | if (skb_cloned(skb)) { |
994 | /* copy this zero copy skb frags */ | 1003 | /* copy this zero copy skb frags */ |
995 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { | 1004 | if (skb_orphan_frags(skb, gfp_mask)) |
996 | if (skb_copy_ubufs(skb, gfp_mask)) | 1005 | goto nofrags; |
997 | goto nofrags; | ||
998 | } | ||
999 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 1006 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
1000 | skb_frag_ref(skb, i); | 1007 | skb_frag_ref(skb, i); |
1001 | 1008 | ||
@@ -2614,7 +2621,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, | |||
2614 | EXPORT_SYMBOL(skb_find_text); | 2621 | EXPORT_SYMBOL(skb_find_text); |
2615 | 2622 | ||
2616 | /** | 2623 | /** |
2617 | * skb_append_datato_frags: - append the user data to a skb | 2624 | * skb_append_datato_frags - append the user data to a skb |
2618 | * @sk: sock structure | 2625 | * @sk: sock structure |
2619 | * @skb: skb structure to be appened with user data. | 2626 | * @skb: skb structure to be appened with user data. |
2620 | * @getfrag: call back function to be used for getting the user data | 2627 | * @getfrag: call back function to be used for getting the user data |