diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2011-12-03 16:39:53 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-12-04 13:20:39 -0500 |
commit | 117632e64d2a5f464e491fe221d7169a3814a77b (patch) | |
tree | 88f3a036305da54a62835d900553dda9bc846a8f /net | |
parent | c2e4e25afcc8ae1835a6100089f1f9fd3a362430 (diff) |
tcp: take care of misalignments
We discovered that TCP stack could retransmit misaligned skbs if a
malicious peer acknowledged sub MSS frame. This currently can happen
only if output interface is non SG enabled : If SG is enabled, tcp
builds headless skbs (all payload is included in fragments), so the tcp
trimming process only removes parts of skb fragments, header stay
aligned.
Some arches cant handle misalignments, so force a head reallocation and
shrink headroom to MAX_TCP_HEADER.
Dont care about misaligments on x86 and PPC (or other arches setting
NET_IP_ALIGN to 0)
This patch introduces __pskb_copy() which can specify the headroom of
new head, and pskb_copy() becomes a wrapper on top of __pskb_copy()
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/skbuff.c | 11 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 10 |
2 files changed, 15 insertions, 6 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 678ae4e783aa..fd3646209b65 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -840,8 +840,9 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) | |||
840 | EXPORT_SYMBOL(skb_copy); | 840 | EXPORT_SYMBOL(skb_copy); |
841 | 841 | ||
842 | /** | 842 | /** |
843 | * pskb_copy - create copy of an sk_buff with private head. | 843 | * __pskb_copy - create copy of an sk_buff with private head. |
844 | * @skb: buffer to copy | 844 | * @skb: buffer to copy |
845 | * @headroom: headroom of new skb | ||
845 | * @gfp_mask: allocation priority | 846 | * @gfp_mask: allocation priority |
846 | * | 847 | * |
847 | * Make a copy of both an &sk_buff and part of its data, located | 848 | * Make a copy of both an &sk_buff and part of its data, located |
@@ -852,16 +853,16 @@ EXPORT_SYMBOL(skb_copy); | |||
852 | * The returned buffer has a reference count of 1. | 853 | * The returned buffer has a reference count of 1. |
853 | */ | 854 | */ |
854 | 855 | ||
855 | struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) | 856 | struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) |
856 | { | 857 | { |
857 | unsigned int size = skb_end_pointer(skb) - skb->head; | 858 | unsigned int size = skb_headlen(skb) + headroom; |
858 | struct sk_buff *n = alloc_skb(size, gfp_mask); | 859 | struct sk_buff *n = alloc_skb(size, gfp_mask); |
859 | 860 | ||
860 | if (!n) | 861 | if (!n) |
861 | goto out; | 862 | goto out; |
862 | 863 | ||
863 | /* Set the data pointer */ | 864 | /* Set the data pointer */ |
864 | skb_reserve(n, skb_headroom(skb)); | 865 | skb_reserve(n, headroom); |
865 | /* Set the tail pointer and length */ | 866 | /* Set the tail pointer and length */ |
866 | skb_put(n, skb_headlen(skb)); | 867 | skb_put(n, skb_headlen(skb)); |
867 | /* Copy the bytes */ | 868 | /* Copy the bytes */ |
@@ -897,7 +898,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) | |||
897 | out: | 898 | out: |
898 | return n; | 899 | return n; |
899 | } | 900 | } |
900 | EXPORT_SYMBOL(pskb_copy); | 901 | EXPORT_SYMBOL(__pskb_copy); |
901 | 902 | ||
902 | /** | 903 | /** |
903 | * pskb_expand_head - reallocate header of &sk_buff | 904 | * pskb_expand_head - reallocate header of &sk_buff |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 58f69acd3d22..50788d67bdb7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2147,7 +2147,15 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
2147 | */ | 2147 | */ |
2148 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 2148 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
2149 | 2149 | ||
2150 | err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); | 2150 | /* make sure skb->data is aligned on arches that require it */ |
2151 | if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { | ||
2152 | struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, | ||
2153 | GFP_ATOMIC); | ||
2154 | err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : | ||
2155 | -ENOBUFS; | ||
2156 | } else { | ||
2157 | err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); | ||
2158 | } | ||
2151 | 2159 | ||
2152 | if (err == 0) { | 2160 | if (err == 0) { |
2153 | /* Update global TCP statistics. */ | 2161 | /* Update global TCP statistics. */ |