diff options
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 36 |
1 files changed, 16 insertions, 20 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 61059a05ec95..32e31c299631 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -552,20 +552,13 @@ static void kfree_skbmem(struct sk_buff *skb) | |||
552 | case SKB_FCLONE_CLONE: | 552 | case SKB_FCLONE_CLONE: |
553 | fclones = container_of(skb, struct sk_buff_fclones, skb2); | 553 | fclones = container_of(skb, struct sk_buff_fclones, skb2); |
554 | 554 | ||
555 | /* Warning : We must perform the atomic_dec_and_test() before | 555 | /* The clone portion is available for |
556 | * setting skb->fclone back to SKB_FCLONE_FREE, otherwise | 556 | * fast-cloning again. |
557 | * skb_clone() could set clone_ref to 2 before our decrement. | ||
558 | * Anyway, if we are going to free the structure, no need to | ||
559 | * rewrite skb->fclone. | ||
560 | */ | 557 | */ |
561 | if (atomic_dec_and_test(&fclones->fclone_ref)) { | 558 | skb->fclone = SKB_FCLONE_FREE; |
559 | |||
560 | if (atomic_dec_and_test(&fclones->fclone_ref)) | ||
562 | kmem_cache_free(skbuff_fclone_cache, fclones); | 561 | kmem_cache_free(skbuff_fclone_cache, fclones); |
563 | } else { | ||
564 | /* The clone portion is available for | ||
565 | * fast-cloning again. | ||
566 | */ | ||
567 | skb->fclone = SKB_FCLONE_FREE; | ||
568 | } | ||
569 | break; | 562 | break; |
570 | } | 563 | } |
571 | } | 564 | } |
@@ -887,11 +880,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
887 | if (skb->fclone == SKB_FCLONE_ORIG && | 880 | if (skb->fclone == SKB_FCLONE_ORIG && |
888 | n->fclone == SKB_FCLONE_FREE) { | 881 | n->fclone == SKB_FCLONE_FREE) { |
889 | n->fclone = SKB_FCLONE_CLONE; | 882 | n->fclone = SKB_FCLONE_CLONE; |
890 | /* As our fastclone was free, clone_ref must be 1 at this point. | 883 | atomic_inc(&fclones->fclone_ref); |
891 | * We could use atomic_inc() here, but it is faster | ||
892 | * to set the final value. | ||
893 | */ | ||
894 | atomic_set(&fclones->fclone_ref, 2); | ||
895 | } else { | 884 | } else { |
896 | if (skb_pfmemalloc(skb)) | 885 | if (skb_pfmemalloc(skb)) |
897 | gfp_mask |= __GFP_MEMALLOC; | 886 | gfp_mask |= __GFP_MEMALLOC; |
@@ -4070,15 +4059,22 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet); | |||
4070 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) | 4059 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
4071 | { | 4060 | { |
4072 | const struct skb_shared_info *shinfo = skb_shinfo(skb); | 4061 | const struct skb_shared_info *shinfo = skb_shinfo(skb); |
4062 | unsigned int thlen = 0; | ||
4073 | 4063 | ||
4074 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) | 4064 | if (skb->encapsulation) { |
4075 | return tcp_hdrlen(skb) + shinfo->gso_size; | 4065 | thlen = skb_inner_transport_header(skb) - |
4066 | skb_transport_header(skb); | ||
4076 | 4067 | ||
4068 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) | ||
4069 | thlen += inner_tcp_hdrlen(skb); | ||
4070 | } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { | ||
4071 | thlen = tcp_hdrlen(skb); | ||
4072 | } | ||
4077 | /* UFO sets gso_size to the size of the fragmentation | 4073 | /* UFO sets gso_size to the size of the fragmentation |
4078 | * payload, i.e. the size of the L4 (UDP) header is already | 4074 | * payload, i.e. the size of the L4 (UDP) header is already |
4079 | * accounted for. | 4075 | * accounted for. |
4080 | */ | 4076 | */ |
4081 | return shinfo->gso_size; | 4077 | return thlen + shinfo->gso_size; |
4082 | } | 4078 | } |
4083 | EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); | 4079 | EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); |
4084 | 4080 | ||