diff options
-rw-r--r-- | include/linux/skbuff.h | 25 | ||||
-rw-r--r-- | net/core/skbuff.c | 41 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 5 | ||||
-rw-r--r-- | net/xfrm/xfrm_policy.c | 4 |
4 files changed, 47 insertions, 28 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 262efdbc346b..d8f7d74d5a4d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -781,6 +781,31 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, | |||
781 | int *errcode, | 781 | int *errcode, |
782 | gfp_t gfp_mask); | 782 | gfp_t gfp_mask); |
783 | 783 | ||
784 | /* Layout of fast clones : [skb1][skb2][fclone_ref] */ | ||
785 | struct sk_buff_fclones { | ||
786 | struct sk_buff skb1; | ||
787 | |||
788 | struct sk_buff skb2; | ||
789 | |||
790 | atomic_t fclone_ref; | ||
791 | }; | ||
792 | |||
793 | /** | ||
794 | * skb_fclone_busy - check if fclone is busy | ||
795 | * @skb: buffer | ||
796 | * | ||
797 | * Returns true is skb is a fast clone, and its clone is not freed. | ||
798 | */ | ||
799 | static inline bool skb_fclone_busy(const struct sk_buff *skb) | ||
800 | { | ||
801 | const struct sk_buff_fclones *fclones; | ||
802 | |||
803 | fclones = container_of(skb, struct sk_buff_fclones, skb1); | ||
804 | |||
805 | return skb->fclone == SKB_FCLONE_ORIG && | ||
806 | fclones->skb2.fclone == SKB_FCLONE_CLONE; | ||
807 | } | ||
808 | |||
784 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, | 809 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, |
785 | gfp_t priority) | 810 | gfp_t priority) |
786 | { | 811 | { |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4be570a4ab21..a8cebb40699c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -257,15 +257,16 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
257 | kmemcheck_annotate_variable(shinfo->destructor_arg); | 257 | kmemcheck_annotate_variable(shinfo->destructor_arg); |
258 | 258 | ||
259 | if (flags & SKB_ALLOC_FCLONE) { | 259 | if (flags & SKB_ALLOC_FCLONE) { |
260 | struct sk_buff *child = skb + 1; | 260 | struct sk_buff_fclones *fclones; |
261 | atomic_t *fclone_ref = (atomic_t *) (child + 1); | ||
262 | 261 | ||
263 | kmemcheck_annotate_bitfield(child, flags1); | 262 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
263 | |||
264 | kmemcheck_annotate_bitfield(&fclones->skb2, flags1); | ||
264 | skb->fclone = SKB_FCLONE_ORIG; | 265 | skb->fclone = SKB_FCLONE_ORIG; |
265 | atomic_set(fclone_ref, 1); | 266 | atomic_set(&fclones->fclone_ref, 1); |
266 | 267 | ||
267 | child->fclone = SKB_FCLONE_UNAVAILABLE; | 268 | fclones->skb2.fclone = SKB_FCLONE_UNAVAILABLE; |
268 | child->pfmemalloc = pfmemalloc; | 269 | fclones->skb2.pfmemalloc = pfmemalloc; |
269 | } | 270 | } |
270 | out: | 271 | out: |
271 | return skb; | 272 | return skb; |
@@ -524,8 +525,7 @@ static void skb_release_data(struct sk_buff *skb) | |||
524 | */ | 525 | */ |
525 | static void kfree_skbmem(struct sk_buff *skb) | 526 | static void kfree_skbmem(struct sk_buff *skb) |
526 | { | 527 | { |
527 | struct sk_buff *other; | 528 | struct sk_buff_fclones *fclones; |
528 | atomic_t *fclone_ref; | ||
529 | 529 | ||
530 | switch (skb->fclone) { | 530 | switch (skb->fclone) { |
531 | case SKB_FCLONE_UNAVAILABLE: | 531 | case SKB_FCLONE_UNAVAILABLE: |
@@ -533,22 +533,21 @@ static void kfree_skbmem(struct sk_buff *skb) | |||
533 | break; | 533 | break; |
534 | 534 | ||
535 | case SKB_FCLONE_ORIG: | 535 | case SKB_FCLONE_ORIG: |
536 | fclone_ref = (atomic_t *) (skb + 2); | 536 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
537 | if (atomic_dec_and_test(fclone_ref)) | 537 | if (atomic_dec_and_test(&fclones->fclone_ref)) |
538 | kmem_cache_free(skbuff_fclone_cache, skb); | 538 | kmem_cache_free(skbuff_fclone_cache, fclones); |
539 | break; | 539 | break; |
540 | 540 | ||
541 | case SKB_FCLONE_CLONE: | 541 | case SKB_FCLONE_CLONE: |
542 | fclone_ref = (atomic_t *) (skb + 1); | 542 | fclones = container_of(skb, struct sk_buff_fclones, skb2); |
543 | other = skb - 1; | ||
544 | 543 | ||
545 | /* The clone portion is available for | 544 | /* The clone portion is available for |
546 | * fast-cloning again. | 545 | * fast-cloning again. |
547 | */ | 546 | */ |
548 | skb->fclone = SKB_FCLONE_UNAVAILABLE; | 547 | skb->fclone = SKB_FCLONE_UNAVAILABLE; |
549 | 548 | ||
550 | if (atomic_dec_and_test(fclone_ref)) | 549 | if (atomic_dec_and_test(&fclones->fclone_ref)) |
551 | kmem_cache_free(skbuff_fclone_cache, other); | 550 | kmem_cache_free(skbuff_fclone_cache, fclones); |
552 | break; | 551 | break; |
553 | } | 552 | } |
554 | } | 553 | } |
@@ -859,17 +858,18 @@ EXPORT_SYMBOL_GPL(skb_copy_ubufs); | |||
859 | 858 | ||
860 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | 859 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) |
861 | { | 860 | { |
862 | struct sk_buff *n; | 861 | struct sk_buff_fclones *fclones = container_of(skb, |
862 | struct sk_buff_fclones, | ||
863 | skb1); | ||
864 | struct sk_buff *n = &fclones->skb2; | ||
863 | 865 | ||
864 | if (skb_orphan_frags(skb, gfp_mask)) | 866 | if (skb_orphan_frags(skb, gfp_mask)) |
865 | return NULL; | 867 | return NULL; |
866 | 868 | ||
867 | n = skb + 1; | ||
868 | if (skb->fclone == SKB_FCLONE_ORIG && | 869 | if (skb->fclone == SKB_FCLONE_ORIG && |
869 | n->fclone == SKB_FCLONE_UNAVAILABLE) { | 870 | n->fclone == SKB_FCLONE_UNAVAILABLE) { |
870 | atomic_t *fclone_ref = (atomic_t *) (n + 1); | ||
871 | n->fclone = SKB_FCLONE_CLONE; | 871 | n->fclone = SKB_FCLONE_CLONE; |
872 | atomic_inc(fclone_ref); | 872 | atomic_inc(&fclones->fclone_ref); |
873 | } else { | 873 | } else { |
874 | if (skb_pfmemalloc(skb)) | 874 | if (skb_pfmemalloc(skb)) |
875 | gfp_mask |= __GFP_MEMALLOC; | 875 | gfp_mask |= __GFP_MEMALLOC; |
@@ -3240,8 +3240,7 @@ void __init skb_init(void) | |||
3240 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, | 3240 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
3241 | NULL); | 3241 | NULL); |
3242 | skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", | 3242 | skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", |
3243 | (2*sizeof(struct sk_buff)) + | 3243 | sizeof(struct sk_buff_fclones), |
3244 | sizeof(atomic_t), | ||
3245 | 0, | 3244 | 0, |
3246 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, | 3245 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
3247 | NULL); | 3246 | NULL); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index ee567e9e98c3..8d4eac793700 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2110,10 +2110,7 @@ bool tcp_schedule_loss_probe(struct sock *sk) | |||
2110 | static bool skb_still_in_host_queue(const struct sock *sk, | 2110 | static bool skb_still_in_host_queue(const struct sock *sk, |
2111 | const struct sk_buff *skb) | 2111 | const struct sk_buff *skb) |
2112 | { | 2112 | { |
2113 | const struct sk_buff *fclone = skb + 1; | 2113 | if (unlikely(skb_fclone_busy(skb))) { |
2114 | |||
2115 | if (unlikely(skb->fclone == SKB_FCLONE_ORIG && | ||
2116 | fclone->fclone == SKB_FCLONE_CLONE)) { | ||
2117 | NET_INC_STATS_BH(sock_net(sk), | 2114 | NET_INC_STATS_BH(sock_net(sk), |
2118 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); | 2115 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); |
2119 | return true; | 2116 | return true; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f623dca6ce30..4c4e457e7888 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1961,10 +1961,8 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb) | |||
1961 | struct xfrm_dst *xdst = (struct xfrm_dst *) dst; | 1961 | struct xfrm_dst *xdst = (struct xfrm_dst *) dst; |
1962 | struct xfrm_policy *pol = xdst->pols[0]; | 1962 | struct xfrm_policy *pol = xdst->pols[0]; |
1963 | struct xfrm_policy_queue *pq = &pol->polq; | 1963 | struct xfrm_policy_queue *pq = &pol->polq; |
1964 | const struct sk_buff *fclone = skb + 1; | ||
1965 | 1964 | ||
1966 | if (unlikely(skb->fclone == SKB_FCLONE_ORIG && | 1965 | if (unlikely(skb_fclone_busy(skb))) { |
1967 | fclone->fclone == SKB_FCLONE_CLONE)) { | ||
1968 | kfree_skb(skb); | 1966 | kfree_skb(skb); |
1969 | return 0; | 1967 | return 0; |
1970 | } | 1968 | } |