aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorAndy Fleming <afleming@freescale.com>2011-10-13 00:33:54 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-19 15:59:45 -0400
commit3d153a7c8b23031df35e61377c0600a6c96a8b0b (patch)
treef5c7f622331728ba12f40c2ebb66666ce3d924fd /net/core/skbuff.c
parent1e5c22cde3b85737921d3ec6ecf2c356e5b64ea7 (diff)
net: Allow skb_recycle_check to be done in stages
skb_recycle_check resets the skb if it's eligible for recycling. However, there are times when a driver might want to optionally manipulate the skb data with the skb before resetting the skb, but after it has determined eligibility. We do this by splitting the eligibility check from the skb reset, creating two inline functions to accomplish that task. Signed-off-by: Andy Fleming <afleming@freescale.com> Acked-by: David Daney <david.daney@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c51
1 files changed, 26 insertions, 25 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ce357d986251..e27104039a39 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -485,6 +485,30 @@ void consume_skb(struct sk_buff *skb)
485EXPORT_SYMBOL(consume_skb); 485EXPORT_SYMBOL(consume_skb);
486 486
487/** 487/**
488 * skb_recycle - clean up an skb for reuse
489 * @skb: buffer
490 *
491 * Recycles the skb to be reused as a receive buffer. This
492 * function does any necessary reference count dropping, and
493 * cleans up the skbuff as if it just came from __alloc_skb().
494 */
495void skb_recycle(struct sk_buff *skb)
496{
497 struct skb_shared_info *shinfo;
498
499 skb_release_head_state(skb);
500
501 shinfo = skb_shinfo(skb);
502 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
503 atomic_set(&shinfo->dataref, 1);
504
505 memset(skb, 0, offsetof(struct sk_buff, tail));
506 skb->data = skb->head + NET_SKB_PAD;
507 skb_reset_tail_pointer(skb);
508}
509EXPORT_SYMBOL(skb_recycle);
510
511/**
488 * skb_recycle_check - check if skb can be reused for receive 512 * skb_recycle_check - check if skb can be reused for receive
489 * @skb: buffer 513 * @skb: buffer
490 * @skb_size: minimum receive buffer size 514 * @skb_size: minimum receive buffer size
@@ -498,33 +522,10 @@ EXPORT_SYMBOL(consume_skb);
498 */ 522 */
499bool skb_recycle_check(struct sk_buff *skb, int skb_size) 523bool skb_recycle_check(struct sk_buff *skb, int skb_size)
500{ 524{
501 struct skb_shared_info *shinfo; 525 if (!skb_is_recycleable(skb, skb_size))
502
503 if (irqs_disabled())
504 return false;
505
506 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
507 return false;
508
509 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
510 return false;
511
512 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
513 if (skb_end_pointer(skb) - skb->head < skb_size)
514 return false;
515
516 if (skb_shared(skb) || skb_cloned(skb))
517 return false; 526 return false;
518 527
519 skb_release_head_state(skb); 528 skb_recycle(skb);
520
521 shinfo = skb_shinfo(skb);
522 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
523 atomic_set(&shinfo->dataref, 1);
524
525 memset(skb, 0, offsetof(struct sk_buff, tail));
526 skb->data = skb->head + NET_SKB_PAD;
527 skb_reset_tail_pointer(skb);
528 529
529 return true; 530 return true;
530} 531}