diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2012-05-04 10:26:51 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-05-06 13:13:19 -0400 |
commit | 3e24591a19bbda6fcb2cbe383b41b4ba794501bf (patch) | |
tree | 57cb4a46c8b94397c45ac2e9c43440aae6043015 /net/core | |
parent | 9202e31d4632d82bd713fbd7d3fd229c0cd5b9cf (diff) |
skb: Drop "fastpath" variable for skb_cloned check in pskb_expand_head
Since there is now only one spot that actually uses "fastpath" there isn't
much point in carrying it. Instead we can just use a check for skb_cloned
to verify if we can perform the fast-path free for the head or not.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/skbuff.c | 22 |
1 files changed, 8 insertions, 14 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4d085d454285..17e4b1e1bf2c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -932,7 +932,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
932 | u8 *data; | 932 | u8 *data; |
933 | int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; | 933 | int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; |
934 | long off; | 934 | long off; |
935 | bool fastpath; | ||
936 | 935 | ||
937 | BUG_ON(nhead < 0); | 936 | BUG_ON(nhead < 0); |
938 | 937 | ||
@@ -941,16 +940,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
941 | 940 | ||
942 | size = SKB_DATA_ALIGN(size); | 941 | size = SKB_DATA_ALIGN(size); |
943 | 942 | ||
944 | /* Check if we can avoid taking references on fragments if we own | ||
945 | * the last reference on skb->head. (see skb_release_data()) | ||
946 | */ | ||
947 | if (!skb->cloned) | ||
948 | fastpath = true; | ||
949 | else { | ||
950 | int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; | ||
951 | fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; | ||
952 | } | ||
953 | |||
954 | data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), | 943 | data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), |
955 | gfp_mask); | 944 | gfp_mask); |
956 | if (!data) | 945 | if (!data) |
@@ -966,9 +955,12 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
966 | skb_shinfo(skb), | 955 | skb_shinfo(skb), |
967 | offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); | 956 | offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); |
968 | 957 | ||
969 | if (fastpath) { | 958 | /* |
970 | skb_free_head(skb); | 959 | * if shinfo is shared we must drop the old head gracefully, but if it |
971 | } else { | 960 | * is not we can just drop the old head and let the existing refcount |
961 | * be since all we did is relocate the values | ||
962 | */ | ||
963 | if (skb_cloned(skb)) { | ||
972 | /* copy this zero copy skb frags */ | 964 | /* copy this zero copy skb frags */ |
973 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { | 965 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { |
974 | if (skb_copy_ubufs(skb, gfp_mask)) | 966 | if (skb_copy_ubufs(skb, gfp_mask)) |
@@ -981,6 +973,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
981 | skb_clone_fraglist(skb); | 973 | skb_clone_fraglist(skb); |
982 | 974 | ||
983 | skb_release_data(skb); | 975 | skb_release_data(skb); |
976 | } else { | ||
977 | skb_free_head(skb); | ||
984 | } | 978 | } |
985 | off = (data + nhead) - skb->head; | 979 | off = (data + nhead) - skb->head; |
986 | 980 | ||