aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-10-05 02:23:55 -0400
committerDavid S. Miller <davem@davemloft.net>2012-10-07 00:40:54 -0400
commitacb600def2110b1310466c0e485c0d26299898ae (patch)
tree21036c7d0518601aba70dde0246ac229cd8dfc0c /net/core
parent809d5fc9bf6589276a12bd4fd611e4c7ff9940c3 (diff)
net: remove skb recycling
Over time, skb recycling infrastructure got litle interest and many bugs. Generic rx path skb allocation is now using page fragments for efficient GRO / TCP coalescing, and recyling a tx skb for rx path is not worth the pain. Last identified bug is that fat skbs can be recycled and it can endup using high order pages after few iterations. With help from Maxime Bizon, who pointed out that commit 87151b8689d (net: allow pskb_expand_head() to get maximum tailroom) introduced this regression for recycled skbs. Instead of fixing this bug, lets remove skb recycling. Drivers wanting really hot skbs should use build_skb() anyway, to allocate/populate sk_buff right before netif_receive_skb() Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Maxime Bizon <mbizon@freebox.fr> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/skbuff.c47
1 files changed, 0 insertions, 47 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index cdc28598f4ef..6e04b1fa11f2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -655,53 +655,6 @@ void consume_skb(struct sk_buff *skb)
655} 655}
656EXPORT_SYMBOL(consume_skb); 656EXPORT_SYMBOL(consume_skb);
657 657
658/**
659 * skb_recycle - clean up an skb for reuse
660 * @skb: buffer
661 *
662 * Recycles the skb to be reused as a receive buffer. This
663 * function does any necessary reference count dropping, and
664 * cleans up the skbuff as if it just came from __alloc_skb().
665 */
666void skb_recycle(struct sk_buff *skb)
667{
668 struct skb_shared_info *shinfo;
669
670 skb_release_head_state(skb);
671
672 shinfo = skb_shinfo(skb);
673 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
674 atomic_set(&shinfo->dataref, 1);
675
676 memset(skb, 0, offsetof(struct sk_buff, tail));
677 skb->data = skb->head + NET_SKB_PAD;
678 skb_reset_tail_pointer(skb);
679}
680EXPORT_SYMBOL(skb_recycle);
681
682/**
683 * skb_recycle_check - check if skb can be reused for receive
684 * @skb: buffer
685 * @skb_size: minimum receive buffer size
686 *
687 * Checks that the skb passed in is not shared or cloned, and
688 * that it is linear and its head portion at least as large as
689 * skb_size so that it can be recycled as a receive buffer.
690 * If these conditions are met, this function does any necessary
691 * reference count dropping and cleans up the skbuff as if it
692 * just came from __alloc_skb().
693 */
694bool skb_recycle_check(struct sk_buff *skb, int skb_size)
695{
696 if (!skb_is_recycleable(skb, skb_size))
697 return false;
698
699 skb_recycle(skb);
700
701 return true;
702}
703EXPORT_SYMBOL(skb_recycle_check);
704
705static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 658static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
706{ 659{
707 new->tstamp = old->tstamp; 660 new->tstamp = old->tstamp;