diff options
| author | Eric Dumazet <edumazet@google.com> | 2014-10-01 18:27:15 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2014-10-01 21:27:23 -0400 |
| commit | ce1a4ea3f125863bfbcb1afb76590ee2b7b93fbf (patch) | |
| tree | 5efaaa29736600ab06f5c4ed27af855fb543de13 /net/core | |
| parent | e500f488c27659bb6f5d313b336621f3daa67701 (diff) | |
net: avoid one atomic operation in skb_clone()
Fast clone cloning can actually avoid an atomic_inc(), if we
guarantee prior clone_ref value is 1.
This requires a change kfree_skbmem(), to perform the
atomic_dec_and_test() on clone_ref before setting fclone to
SKB_FCLONE_UNAVAILABLE.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
| -rw-r--r-- | net/core/skbuff.c | 23 |
1 files changed, 17 insertions, 6 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a8cebb40699c..f77e64873caf 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -541,13 +541,20 @@ static void kfree_skbmem(struct sk_buff *skb) | |||
| 541 | case SKB_FCLONE_CLONE: | 541 | case SKB_FCLONE_CLONE: |
| 542 | fclones = container_of(skb, struct sk_buff_fclones, skb2); | 542 | fclones = container_of(skb, struct sk_buff_fclones, skb2); |
| 543 | 543 | ||
| 544 | /* The clone portion is available for | 544 | /* Warning : We must perform the atomic_dec_and_test() before |
| 545 | * fast-cloning again. | 545 | * setting skb->fclone back to SKB_FCLONE_UNAVAILABLE, otherwise |
| 546 | * skb_clone() could set clone_ref to 2 before our decrement. | ||
| 547 | * Anyway, if we are going to free the structure, no need to | ||
| 548 | * rewrite skb->fclone. | ||
| 546 | */ | 549 | */ |
| 547 | skb->fclone = SKB_FCLONE_UNAVAILABLE; | 550 | if (atomic_dec_and_test(&fclones->fclone_ref)) { |
| 548 | |||
| 549 | if (atomic_dec_and_test(&fclones->fclone_ref)) | ||
| 550 | kmem_cache_free(skbuff_fclone_cache, fclones); | 551 | kmem_cache_free(skbuff_fclone_cache, fclones); |
| 552 | } else { | ||
| 553 | /* The clone portion is available for | ||
| 554 | * fast-cloning again. | ||
| 555 | */ | ||
| 556 | skb->fclone = SKB_FCLONE_UNAVAILABLE; | ||
| 557 | } | ||
| 551 | break; | 558 | break; |
| 552 | } | 559 | } |
| 553 | } | 560 | } |
| @@ -869,7 +876,11 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
| 869 | if (skb->fclone == SKB_FCLONE_ORIG && | 876 | if (skb->fclone == SKB_FCLONE_ORIG && |
| 870 | n->fclone == SKB_FCLONE_UNAVAILABLE) { | 877 | n->fclone == SKB_FCLONE_UNAVAILABLE) { |
| 871 | n->fclone = SKB_FCLONE_CLONE; | 878 | n->fclone = SKB_FCLONE_CLONE; |
| 872 | atomic_inc(&fclones->fclone_ref); | 879 | /* As our fastclone was free, clone_ref must be 1 at this point. |
| 880 | * We could use atomic_inc() here, but it is faster | ||
| 881 | * to set the final value. | ||
| 882 | */ | ||
| 883 | atomic_set(&fclones->fclone_ref, 2); | ||
| 873 | } else { | 884 | } else { |
| 874 | if (skb_pfmemalloc(skb)) | 885 | if (skb_pfmemalloc(skb)) |
| 875 | gfp_mask |= __GFP_MEMALLOC; | 886 | gfp_mask |= __GFP_MEMALLOC; |
