aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-05-18 23:02:02 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-19 18:34:57 -0400
commitbad43ca8325f493dcaa0896c2f036276af059c7e (patch)
treeee27a3b3eeef928f22cd500a32a23e4db60a5584 /net/core/skbuff.c
parent3dde25988292864a582b4a9389b1ae835aa3fe80 (diff)
net: introduce skb_try_coalesce()
Move tcp_try_coalesce() protocol independent part to skb_try_coalesce(). skb_try_coalesce() can be used in IPv4 defrag and IPv6 reassembly, to build optimized skbs (less sk_buff, and possibly less 'headers') skb_try_coalesce() is zero copy, unless the copy can fit in destination header (its a rare case) kfree_skb_partial() is also moved to net/core/skbuff.c and exported, because IPv6 will need it in patch (ipv6: use skb coalescing in reassembly). Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c86
1 files changed, 86 insertions, 0 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7ceb673d622f..016694d62484 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3346,3 +3346,89 @@ void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3346 skb->dev->name); 3346 skb->dev->name);
3347} 3347}
3348EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3348EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3349
3350void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
3351{
3352 if (head_stolen)
3353 kmem_cache_free(skbuff_head_cache, skb);
3354 else
3355 __kfree_skb(skb);
3356}
3357EXPORT_SYMBOL(kfree_skb_partial);
3358
3359/**
3360 * skb_try_coalesce - try to merge skb to prior one
3361 * @to: prior buffer
3362 * @from: buffer to add
3363 * @fragstolen: pointer to boolean
3364 *
3365 */
3366bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
3367 bool *fragstolen, int *delta_truesize)
3368{
3369 int i, delta, len = from->len;
3370
3371 *fragstolen = false;
3372
3373 if (skb_cloned(to))
3374 return false;
3375
3376 if (len <= skb_tailroom(to)) {
3377 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
3378 *delta_truesize = 0;
3379 return true;
3380 }
3381
3382 if (skb_has_frag_list(to) || skb_has_frag_list(from))
3383 return false;
3384
3385 if (skb_headlen(from) != 0) {
3386 struct page *page;
3387 unsigned int offset;
3388
3389 if (skb_shinfo(to)->nr_frags +
3390 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
3391 return false;
3392
3393 if (skb_head_is_locked(from))
3394 return false;
3395
3396 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3397
3398 page = virt_to_head_page(from->head);
3399 offset = from->data - (unsigned char *)page_address(page);
3400
3401 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
3402 page, offset, skb_headlen(from));
3403 *fragstolen = true;
3404 } else {
3405 if (skb_shinfo(to)->nr_frags +
3406 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
3407 return false;
3408
3409 delta = from->truesize -
3410 SKB_TRUESIZE(skb_end_pointer(from) - from->head);
3411 }
3412
3413 WARN_ON_ONCE(delta < len);
3414
3415 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
3416 skb_shinfo(from)->frags,
3417 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
3418 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
3419
3420 if (!skb_cloned(from))
3421 skb_shinfo(from)->nr_frags = 0;
3422
3423 /* if the skb is cloned this does nothing since we set nr_frags to 0 */
3424 for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
3425 skb_frag_ref(from, i);
3426
3427 to->truesize += delta;
3428 to->len += len;
3429 to->data_len += len;
3430
3431 *delta_truesize = delta;
3432 return true;
3433}
3434EXPORT_SYMBOL(skb_try_coalesce);