aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-09-29 16:29:15 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-01 16:34:25 -0400
commitd0bf4a9e92b9a93ffeeacbd7b6cb83e0ee3dc2ef (patch)
tree1ba29128caa07307632b6219c1692308ec89983a /net/core
parentb248230c34970a6c1c17c591d63b464e8d2cfc33 (diff)
net: cleanup and document skb fclone layout
Lets use a proper structure to clearly document and implement skb fast clones. Then, we might experiment more easily alternative layouts. This patch adds a new skb_fclone_busy() helper, used by tcp and xfrm, to stop leaking of implementation details. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/skbuff.c41
1 files changed, 20 insertions, 21 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4be570a4ab21..a8cebb40699c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -257,15 +257,16 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
257 kmemcheck_annotate_variable(shinfo->destructor_arg); 257 kmemcheck_annotate_variable(shinfo->destructor_arg);
258 258
259 if (flags & SKB_ALLOC_FCLONE) { 259 if (flags & SKB_ALLOC_FCLONE) {
260 struct sk_buff *child = skb + 1; 260 struct sk_buff_fclones *fclones;
261 atomic_t *fclone_ref = (atomic_t *) (child + 1);
262 261
263 kmemcheck_annotate_bitfield(child, flags1); 262 fclones = container_of(skb, struct sk_buff_fclones, skb1);
263
264 kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
264 skb->fclone = SKB_FCLONE_ORIG; 265 skb->fclone = SKB_FCLONE_ORIG;
265 atomic_set(fclone_ref, 1); 266 atomic_set(&fclones->fclone_ref, 1);
266 267
267 child->fclone = SKB_FCLONE_UNAVAILABLE; 268 fclones->skb2.fclone = SKB_FCLONE_UNAVAILABLE;
268 child->pfmemalloc = pfmemalloc; 269 fclones->skb2.pfmemalloc = pfmemalloc;
269 } 270 }
270out: 271out:
271 return skb; 272 return skb;
@@ -524,8 +525,7 @@ static void skb_release_data(struct sk_buff *skb)
524 */ 525 */
525static void kfree_skbmem(struct sk_buff *skb) 526static void kfree_skbmem(struct sk_buff *skb)
526{ 527{
527 struct sk_buff *other; 528 struct sk_buff_fclones *fclones;
528 atomic_t *fclone_ref;
529 529
530 switch (skb->fclone) { 530 switch (skb->fclone) {
531 case SKB_FCLONE_UNAVAILABLE: 531 case SKB_FCLONE_UNAVAILABLE:
@@ -533,22 +533,21 @@ static void kfree_skbmem(struct sk_buff *skb)
533 break; 533 break;
534 534
535 case SKB_FCLONE_ORIG: 535 case SKB_FCLONE_ORIG:
536 fclone_ref = (atomic_t *) (skb + 2); 536 fclones = container_of(skb, struct sk_buff_fclones, skb1);
537 if (atomic_dec_and_test(fclone_ref)) 537 if (atomic_dec_and_test(&fclones->fclone_ref))
538 kmem_cache_free(skbuff_fclone_cache, skb); 538 kmem_cache_free(skbuff_fclone_cache, fclones);
539 break; 539 break;
540 540
541 case SKB_FCLONE_CLONE: 541 case SKB_FCLONE_CLONE:
542 fclone_ref = (atomic_t *) (skb + 1); 542 fclones = container_of(skb, struct sk_buff_fclones, skb2);
543 other = skb - 1;
544 543
545 /* The clone portion is available for 544 /* The clone portion is available for
546 * fast-cloning again. 545 * fast-cloning again.
547 */ 546 */
548 skb->fclone = SKB_FCLONE_UNAVAILABLE; 547 skb->fclone = SKB_FCLONE_UNAVAILABLE;
549 548
550 if (atomic_dec_and_test(fclone_ref)) 549 if (atomic_dec_and_test(&fclones->fclone_ref))
551 kmem_cache_free(skbuff_fclone_cache, other); 550 kmem_cache_free(skbuff_fclone_cache, fclones);
552 break; 551 break;
553 } 552 }
554} 553}
@@ -859,17 +858,18 @@ EXPORT_SYMBOL_GPL(skb_copy_ubufs);
859 858
860struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 859struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
861{ 860{
862 struct sk_buff *n; 861 struct sk_buff_fclones *fclones = container_of(skb,
862 struct sk_buff_fclones,
863 skb1);
864 struct sk_buff *n = &fclones->skb2;
863 865
864 if (skb_orphan_frags(skb, gfp_mask)) 866 if (skb_orphan_frags(skb, gfp_mask))
865 return NULL; 867 return NULL;
866 868
867 n = skb + 1;
868 if (skb->fclone == SKB_FCLONE_ORIG && 869 if (skb->fclone == SKB_FCLONE_ORIG &&
869 n->fclone == SKB_FCLONE_UNAVAILABLE) { 870 n->fclone == SKB_FCLONE_UNAVAILABLE) {
870 atomic_t *fclone_ref = (atomic_t *) (n + 1);
871 n->fclone = SKB_FCLONE_CLONE; 871 n->fclone = SKB_FCLONE_CLONE;
872 atomic_inc(fclone_ref); 872 atomic_inc(&fclones->fclone_ref);
873 } else { 873 } else {
874 if (skb_pfmemalloc(skb)) 874 if (skb_pfmemalloc(skb))
875 gfp_mask |= __GFP_MEMALLOC; 875 gfp_mask |= __GFP_MEMALLOC;
@@ -3240,8 +3240,7 @@ void __init skb_init(void)
3240 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3240 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3241 NULL); 3241 NULL);
3242 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 3242 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3243 (2*sizeof(struct sk_buff)) + 3243 sizeof(struct sk_buff_fclones),
3244 sizeof(atomic_t),
3245 0, 3244 0,
3246 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3245 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3247 NULL); 3246 NULL);