aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-12-28 01:06:37 -0500
committerDavid S. Miller <davem@davemloft.net>2012-12-28 18:25:19 -0500
commitb2111724a639ec31a19fdca62ea3a0a222d59d11 (patch)
tree0d707599721ae209b176feab8ce41b7e63191e78 /net/core/skbuff.c
parent210ab6656fa8c49d7238c13f85ed551ebab94fb0 (diff)
net: use per task frag allocator in skb_append_datato_frags
Use the new per task frag allocator in skb_append_datato_frags(), to reduce number of frags and page allocator overhead. Tested: ifconfig lo mtu 16436 perf record netperf -t UDP_STREAM ; perf report before : Throughput: 32928 Mbit/s 51.79% netperf [kernel.kallsyms] [k] copy_user_generic_string 5.98% netperf [kernel.kallsyms] [k] __alloc_pages_nodemask 5.58% netperf [kernel.kallsyms] [k] get_page_from_freelist 5.01% netperf [kernel.kallsyms] [k] __rmqueue 3.74% netperf [kernel.kallsyms] [k] skb_append_datato_frags 1.87% netperf [kernel.kallsyms] [k] prep_new_page 1.42% netperf [kernel.kallsyms] [k] next_zones_zonelist 1.28% netperf [kernel.kallsyms] [k] __inc_zone_state 1.26% netperf [kernel.kallsyms] [k] alloc_pages_current 0.78% netperf [kernel.kallsyms] [k] sock_alloc_send_pskb 0.74% netperf [kernel.kallsyms] [k] udp_sendmsg 0.72% netperf [kernel.kallsyms] [k] zone_watermark_ok 0.68% netperf [kernel.kallsyms] [k] __cpuset_node_allowed_softwall 0.67% netperf [kernel.kallsyms] [k] fib_table_lookup 0.60% netperf [kernel.kallsyms] [k] memcpy_fromiovecend 0.55% netperf [kernel.kallsyms] [k] __udp4_lib_lookup after: Throughput: 47185 Mbit/s 61.74% netperf [kernel.kallsyms] [k] copy_user_generic_string 2.07% netperf [kernel.kallsyms] [k] prep_new_page 1.98% netperf [kernel.kallsyms] [k] skb_append_datato_frags 1.02% netperf [kernel.kallsyms] [k] sock_alloc_send_pskb 0.97% netperf [kernel.kallsyms] [k] enqueue_task_fair 0.97% netperf [kernel.kallsyms] [k] udp_sendmsg 0.91% netperf [kernel.kallsyms] [k] __ip_route_output_key 0.88% netperf [kernel.kallsyms] [k] __netif_receive_skb 0.87% netperf [kernel.kallsyms] [k] fib_table_lookup 0.85% netperf [kernel.kallsyms] [k] resched_task 0.78% netperf [kernel.kallsyms] [k] __udp4_lib_lookup 0.77% netperf [kernel.kallsyms] [k] _raw_spin_lock_irqsave Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c43
1 files changed, 16 insertions, 27 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3ab989b0de42..ec8737ec59b5 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2686,48 +2686,37 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2686 int len, int odd, struct sk_buff *skb), 2686 int len, int odd, struct sk_buff *skb),
2687 void *from, int length) 2687 void *from, int length)
2688{ 2688{
2689 int frg_cnt = 0; 2689 int frg_cnt = skb_shinfo(skb)->nr_frags;
2690 skb_frag_t *frag = NULL; 2690 int copy;
2691 struct page *page = NULL;
2692 int copy, left;
2693 int offset = 0; 2691 int offset = 0;
2694 int ret; 2692 int ret;
2693 struct page_frag *pfrag = &current->task_frag;
2695 2694
2696 do { 2695 do {
2697 /* Return error if we don't have space for new frag */ 2696 /* Return error if we don't have space for new frag */
2698 frg_cnt = skb_shinfo(skb)->nr_frags;
2699 if (frg_cnt >= MAX_SKB_FRAGS) 2697 if (frg_cnt >= MAX_SKB_FRAGS)
2700 return -EFAULT; 2698 return -EMSGSIZE;
2701
2702 /* allocate a new page for next frag */
2703 page = alloc_pages(sk->sk_allocation, 0);
2704 2699
2705 /* If alloc_page fails just return failure and caller will 2700 if (!sk_page_frag_refill(sk, pfrag))
2706 * free previous allocated pages by doing kfree_skb()
2707 */
2708 if (page == NULL)
2709 return -ENOMEM; 2701 return -ENOMEM;
2710 2702
2711 /* initialize the next frag */
2712 skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2713 skb->truesize += PAGE_SIZE;
2714 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2715
2716 /* get the new initialized frag */
2717 frg_cnt = skb_shinfo(skb)->nr_frags;
2718 frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2719
2720 /* copy the user data to page */ 2703 /* copy the user data to page */
2721 left = PAGE_SIZE - frag->page_offset; 2704 copy = min_t(int, length, pfrag->size - pfrag->offset);
2722 copy = (length > left)? left : length;
2723 2705
2724 ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag), 2706 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
2725 offset, copy, 0, skb); 2707 offset, copy, 0, skb);
2726 if (ret < 0) 2708 if (ret < 0)
2727 return -EFAULT; 2709 return -EFAULT;
2728 2710
2729 /* copy was successful so update the size parameters */ 2711 /* copy was successful so update the size parameters */
2730 skb_frag_size_add(frag, copy); 2712 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
2713 copy);
2714 frg_cnt++;
2715 pfrag->offset += copy;
2716 get_page(pfrag->page);
2717
2718 skb->truesize += copy;
2719 atomic_add(copy, &sk->sk_wmem_alloc);
2731 skb->len += copy; 2720 skb->len += copy;
2732 skb->data_len += copy; 2721 skb->data_len += copy;
2733 offset += copy; 2722 offset += copy;