diff options
author | Eric Dumazet <edumazet@google.com> | 2012-05-18 23:02:02 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-05-19 18:34:57 -0400 |
commit | bad43ca8325f493dcaa0896c2f036276af059c7e (patch) | |
tree | ee27a3b3eeef928f22cd500a32a23e4db60a5584 /net/ipv4 | |
parent | 3dde25988292864a582b4a9389b1ae835aa3fe80 (diff) |
net: introduce skb_try_coalesce()
Move tcp_try_coalesce() protocol independent part to
skb_try_coalesce().
skb_try_coalesce() can be used in IPv4 defrag and IPv6 reassembly,
to build optimized skbs (less sk_buff, and possibly less 'headers')
skb_try_coalesce() is zero copy, unless the copy can fit in destination
header (its a rare case)
kfree_skb_partial() is also moved to net/core/skbuff.c and exported,
because IPv6 will need it in patch (ipv6: use skb coalescing in
reassembly).
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp_input.c | 67 |
1 files changed, 3 insertions, 64 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b961ef54b17d..cfa2aa128342 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4549,84 +4549,23 @@ static bool tcp_try_coalesce(struct sock *sk, | |||
4549 | struct sk_buff *from, | 4549 | struct sk_buff *from, |
4550 | bool *fragstolen) | 4550 | bool *fragstolen) |
4551 | { | 4551 | { |
4552 | int i, delta, len = from->len; | 4552 | int delta; |
4553 | 4553 | ||
4554 | *fragstolen = false; | 4554 | *fragstolen = false; |
4555 | 4555 | ||
4556 | if (tcp_hdr(from)->fin || skb_cloned(to)) | 4556 | if (tcp_hdr(from)->fin) |
4557 | return false; | 4557 | return false; |
4558 | 4558 | if (!skb_try_coalesce(to, from, fragstolen, &delta)) | |
4559 | if (len <= skb_tailroom(to)) { | ||
4560 | BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); | ||
4561 | goto merge; | ||
4562 | } | ||
4563 | |||
4564 | if (skb_has_frag_list(to) || skb_has_frag_list(from)) | ||
4565 | return false; | 4559 | return false; |
4566 | 4560 | ||
4567 | if (skb_headlen(from) != 0) { | ||
4568 | struct page *page; | ||
4569 | unsigned int offset; | ||
4570 | |||
4571 | if (skb_shinfo(to)->nr_frags + | ||
4572 | skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) | ||
4573 | return false; | ||
4574 | |||
4575 | if (skb_head_is_locked(from)) | ||
4576 | return false; | ||
4577 | |||
4578 | delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); | ||
4579 | |||
4580 | page = virt_to_head_page(from->head); | ||
4581 | offset = from->data - (unsigned char *)page_address(page); | ||
4582 | |||
4583 | skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, | ||
4584 | page, offset, skb_headlen(from)); | ||
4585 | *fragstolen = true; | ||
4586 | } else { | ||
4587 | if (skb_shinfo(to)->nr_frags + | ||
4588 | skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) | ||
4589 | return false; | ||
4590 | |||
4591 | delta = from->truesize - | ||
4592 | SKB_TRUESIZE(skb_end_pointer(from) - from->head); | ||
4593 | } | ||
4594 | |||
4595 | WARN_ON_ONCE(delta < len); | ||
4596 | |||
4597 | memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, | ||
4598 | skb_shinfo(from)->frags, | ||
4599 | skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); | ||
4600 | skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; | ||
4601 | |||
4602 | if (!skb_cloned(from)) | ||
4603 | skb_shinfo(from)->nr_frags = 0; | ||
4604 | |||
4605 | /* if the skb is cloned this does nothing since we set nr_frags to 0 */ | ||
4606 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) | ||
4607 | skb_frag_ref(from, i); | ||
4608 | |||
4609 | to->truesize += delta; | ||
4610 | atomic_add(delta, &sk->sk_rmem_alloc); | 4561 | atomic_add(delta, &sk->sk_rmem_alloc); |
4611 | sk_mem_charge(sk, delta); | 4562 | sk_mem_charge(sk, delta); |
4612 | to->len += len; | ||
4613 | to->data_len += len; | ||
4614 | |||
4615 | merge: | ||
4616 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); | 4563 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); |
4617 | TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; | 4564 | TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; |
4618 | TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; | 4565 | TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; |
4619 | return true; | 4566 | return true; |
4620 | } | 4567 | } |
4621 | 4568 | ||
4622 | static void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) | ||
4623 | { | ||
4624 | if (head_stolen) | ||
4625 | kmem_cache_free(skbuff_head_cache, skb); | ||
4626 | else | ||
4627 | __kfree_skb(skb); | ||
4628 | } | ||
4629 | |||
4630 | static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) | 4569 | static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) |
4631 | { | 4570 | { |
4632 | struct tcp_sock *tp = tcp_sk(sk); | 4571 | struct tcp_sock *tp = tcp_sk(sk); |