diff options
author | Changli Gao <xiaosuo@gmail.com> | 2010-11-29 17:48:46 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-12-03 13:59:47 -0500 |
commit | ca44ac386181ba710a9ab6db900d6c1e5451b366 (patch) | |
tree | ab71f5c91ddc162ba4f570cbd5e47049a6c2e94e /net/core/skbuff.c | |
parent | 289700dbc40c78741f17e2304ed4ac0db3c3afd3 (diff) |
net: don't reallocate skb->head unless the current one hasn't the needed extra size or is shared
skb head being allocated by kmalloc(), it might be larger than what
actually requested because of discrete kmem caches sizes. Before
reallocating a new skb head, check if the current one has the needed
extra size.
Do this check only if skb head is not shared.
Signed-off-by: Changli Gao <xiaosuo@gmail.com>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 34 |
1 files changed, 23 insertions, 11 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 104f8444754a..8814a9a52f47 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -778,6 +778,28 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
778 | 778 | ||
779 | size = SKB_DATA_ALIGN(size); | 779 | size = SKB_DATA_ALIGN(size); |
780 | 780 | ||
781 | /* Check if we can avoid taking references on fragments if we own | ||
782 | * the last reference on skb->head. (see skb_release_data()) | ||
783 | */ | ||
784 | if (!skb->cloned) | ||
785 | fastpath = true; | ||
786 | else { | ||
787 | int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; | ||
788 | |||
789 | fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; | ||
790 | } | ||
791 | |||
792 | if (fastpath && | ||
793 | size + sizeof(struct skb_shared_info) <= ksize(skb->head)) { | ||
794 | memmove(skb->head + size, skb_shinfo(skb), | ||
795 | offsetof(struct skb_shared_info, | ||
796 | frags[skb_shinfo(skb)->nr_frags])); | ||
797 | memmove(skb->head + nhead, skb->head, | ||
798 | skb_tail_pointer(skb) - skb->head); | ||
799 | off = nhead; | ||
800 | goto adjust_others; | ||
801 | } | ||
802 | |||
781 | data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); | 803 | data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); |
782 | if (!data) | 804 | if (!data) |
783 | goto nodata; | 805 | goto nodata; |
@@ -791,17 +813,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
791 | skb_shinfo(skb), | 813 | skb_shinfo(skb), |
792 | offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); | 814 | offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); |
793 | 815 | ||
794 | /* Check if we can avoid taking references on fragments if we own | ||
795 | * the last reference on skb->head. (see skb_release_data()) | ||
796 | */ | ||
797 | if (!skb->cloned) | ||
798 | fastpath = true; | ||
799 | else { | ||
800 | int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; | ||
801 | |||
802 | fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; | ||
803 | } | ||
804 | |||
805 | if (fastpath) { | 816 | if (fastpath) { |
806 | kfree(skb->head); | 817 | kfree(skb->head); |
807 | } else { | 818 | } else { |
@@ -816,6 +827,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
816 | off = (data + nhead) - skb->head; | 827 | off = (data + nhead) - skb->head; |
817 | 828 | ||
818 | skb->head = data; | 829 | skb->head = data; |
830 | adjust_others: | ||
819 | skb->data += off; | 831 | skb->data += off; |
820 | #ifdef NET_SKBUFF_DATA_USES_OFFSET | 832 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
821 | skb->end = size; | 833 | skb->end = size; |