diff options
Diffstat (limited to 'net/core/skbuff.c')
| -rw-r--r-- | net/core/skbuff.c | 84 |
1 files changed, 83 insertions, 1 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 46cbd28f40f9..2beda824636e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -329,6 +329,18 @@ static void skb_release_data(struct sk_buff *skb) | |||
| 329 | put_page(skb_shinfo(skb)->frags[i].page); | 329 | put_page(skb_shinfo(skb)->frags[i].page); |
| 330 | } | 330 | } |
| 331 | 331 | ||
| 332 | /* | ||
| 333 | * If skb buf is from userspace, we need to notify the caller | ||
| 334 | * the lower device DMA has done; | ||
| 335 | */ | ||
| 336 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { | ||
| 337 | struct ubuf_info *uarg; | ||
| 338 | |||
| 339 | uarg = skb_shinfo(skb)->destructor_arg; | ||
| 340 | if (uarg->callback) | ||
| 341 | uarg->callback(uarg); | ||
| 342 | } | ||
| 343 | |||
| 332 | if (skb_has_frag_list(skb)) | 344 | if (skb_has_frag_list(skb)) |
| 333 | skb_drop_fraglist(skb); | 345 | skb_drop_fraglist(skb); |
| 334 | 346 | ||
| @@ -481,6 +493,9 @@ bool skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
| 481 | if (irqs_disabled()) | 493 | if (irqs_disabled()) |
| 482 | return false; | 494 | return false; |
| 483 | 495 | ||
| 496 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) | ||
| 497 | return false; | ||
| 498 | |||
| 484 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) | 499 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) |
| 485 | return false; | 500 | return false; |
| 486 | 501 | ||
| @@ -596,6 +611,51 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) | |||
| 596 | } | 611 | } |
| 597 | EXPORT_SYMBOL_GPL(skb_morph); | 612 | EXPORT_SYMBOL_GPL(skb_morph); |
| 598 | 613 | ||
| 614 | /* skb frags copy userspace buffers to kernel */ | ||
| 615 | static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) | ||
| 616 | { | ||
| 617 | int i; | ||
| 618 | int num_frags = skb_shinfo(skb)->nr_frags; | ||
| 619 | struct page *page, *head = NULL; | ||
| 620 | struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; | ||
| 621 | |||
| 622 | for (i = 0; i < num_frags; i++) { | ||
| 623 | u8 *vaddr; | ||
| 624 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; | ||
| 625 | |||
| 626 | page = alloc_page(GFP_ATOMIC); | ||
| 627 | if (!page) { | ||
| 628 | while (head) { | ||
| 629 | struct page *next = (struct page *)head->private; | ||
| 630 | put_page(head); | ||
| 631 | head = next; | ||
| 632 | } | ||
| 633 | return -ENOMEM; | ||
| 634 | } | ||
| 635 | vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); | ||
| 636 | memcpy(page_address(page), | ||
| 637 | vaddr + f->page_offset, f->size); | ||
| 638 | kunmap_skb_frag(vaddr); | ||
| 639 | page->private = (unsigned long)head; | ||
| 640 | head = page; | ||
| 641 | } | ||
| 642 | |||
| 643 | /* skb frags release userspace buffers */ | ||
| 644 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | ||
| 645 | put_page(skb_shinfo(skb)->frags[i].page); | ||
| 646 | |||
| 647 | uarg->callback(uarg); | ||
| 648 | |||
| 649 | /* skb frags point to kernel buffers */ | ||
| 650 | for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { | ||
| 651 | skb_shinfo(skb)->frags[i - 1].page_offset = 0; | ||
| 652 | skb_shinfo(skb)->frags[i - 1].page = head; | ||
| 653 | head = (struct page *)head->private; | ||
| 654 | } | ||
| 655 | return 0; | ||
| 656 | } | ||
| 657 | |||
| 658 | |||
| 599 | /** | 659 | /** |
| 600 | * skb_clone - duplicate an sk_buff | 660 | * skb_clone - duplicate an sk_buff |
| 601 | * @skb: buffer to clone | 661 | * @skb: buffer to clone |
| @@ -614,6 +674,12 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
| 614 | { | 674 | { |
| 615 | struct sk_buff *n; | 675 | struct sk_buff *n; |
| 616 | 676 | ||
| 677 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { | ||
| 678 | if (skb_copy_ubufs(skb, gfp_mask)) | ||
| 679 | return NULL; | ||
| 680 | skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; | ||
| 681 | } | ||
| 682 | |||
| 617 | n = skb + 1; | 683 | n = skb + 1; |
| 618 | if (skb->fclone == SKB_FCLONE_ORIG && | 684 | if (skb->fclone == SKB_FCLONE_ORIG && |
| 619 | n->fclone == SKB_FCLONE_UNAVAILABLE) { | 685 | n->fclone == SKB_FCLONE_UNAVAILABLE) { |
| @@ -731,6 +797,14 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) | |||
| 731 | if (skb_shinfo(skb)->nr_frags) { | 797 | if (skb_shinfo(skb)->nr_frags) { |
| 732 | int i; | 798 | int i; |
| 733 | 799 | ||
| 800 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { | ||
| 801 | if (skb_copy_ubufs(skb, gfp_mask)) { | ||
| 802 | kfree_skb(n); | ||
| 803 | n = NULL; | ||
| 804 | goto out; | ||
| 805 | } | ||
| 806 | skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; | ||
| 807 | } | ||
| 734 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 808 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 735 | skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; | 809 | skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; |
| 736 | get_page(skb_shinfo(n)->frags[i].page); | 810 | get_page(skb_shinfo(n)->frags[i].page); |
| @@ -788,7 +862,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
| 788 | fastpath = true; | 862 | fastpath = true; |
| 789 | else { | 863 | else { |
| 790 | int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; | 864 | int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; |
| 791 | |||
| 792 | fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; | 865 | fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; |
| 793 | } | 866 | } |
| 794 | 867 | ||
| @@ -819,6 +892,12 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
| 819 | if (fastpath) { | 892 | if (fastpath) { |
| 820 | kfree(skb->head); | 893 | kfree(skb->head); |
| 821 | } else { | 894 | } else { |
| 895 | /* copy this zero copy skb frags */ | ||
| 896 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { | ||
| 897 | if (skb_copy_ubufs(skb, gfp_mask)) | ||
| 898 | goto nofrags; | ||
| 899 | skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; | ||
| 900 | } | ||
| 822 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 901 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
| 823 | get_page(skb_shinfo(skb)->frags[i].page); | 902 | get_page(skb_shinfo(skb)->frags[i].page); |
| 824 | 903 | ||
| @@ -853,6 +932,8 @@ adjust_others: | |||
| 853 | atomic_set(&skb_shinfo(skb)->dataref, 1); | 932 | atomic_set(&skb_shinfo(skb)->dataref, 1); |
| 854 | return 0; | 933 | return 0; |
| 855 | 934 | ||
| 935 | nofrags: | ||
| 936 | kfree(data); | ||
| 856 | nodata: | 937 | nodata: |
| 857 | return -ENOMEM; | 938 | return -ENOMEM; |
| 858 | } | 939 | } |
| @@ -1354,6 +1435,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) | |||
| 1354 | } | 1435 | } |
| 1355 | start = end; | 1436 | start = end; |
| 1356 | } | 1437 | } |
| 1438 | |||
| 1357 | if (!len) | 1439 | if (!len) |
| 1358 | return 0; | 1440 | return 0; |
| 1359 | 1441 | ||
