aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-12-03 16:39:53 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-04 13:20:39 -0500
commit117632e64d2a5f464e491fe221d7169a3814a77b (patch)
tree88f3a036305da54a62835d900553dda9bc846a8f
parentc2e4e25afcc8ae1835a6100089f1f9fd3a362430 (diff)
tcp: take care of misalignments
We discovered that TCP stack could retransmit misaligned skbs if a malicious peer acknowledged sub MSS frame. This currently can happen only if output interface is non SG enabled : If SG is enabled, tcp builds headless skbs (all payload is included in fragments), so the tcp trimming process only removes parts of skb fragments, header stay aligned. Some arches cant handle misalignments, so force a head reallocation and shrink headroom to MAX_TCP_HEADER. Dont care about misaligments on x86 and PPC (or other arches setting NET_IP_ALIGN to 0) This patch introduces __pskb_copy() which can specify the headroom of new head, and pskb_copy() becomes a wrapper on top of __pskb_copy() Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/skbuff.h11
-rw-r--r--net/core/skbuff.c11
-rw-r--r--net/ipv4/tcp_output.c10
3 files changed, 24 insertions, 8 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index cec0657d0d32..12e6fed73f8e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -568,8 +568,9 @@ extern struct sk_buff *skb_clone(struct sk_buff *skb,
568 gfp_t priority); 568 gfp_t priority);
569extern struct sk_buff *skb_copy(const struct sk_buff *skb, 569extern struct sk_buff *skb_copy(const struct sk_buff *skb,
570 gfp_t priority); 570 gfp_t priority);
571extern struct sk_buff *pskb_copy(struct sk_buff *skb, 571extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
572 gfp_t gfp_mask); 572 int headroom, gfp_t gfp_mask);
573
573extern int pskb_expand_head(struct sk_buff *skb, 574extern int pskb_expand_head(struct sk_buff *skb,
574 int nhead, int ntail, 575 int nhead, int ntail,
575 gfp_t gfp_mask); 576 gfp_t gfp_mask);
@@ -1799,6 +1800,12 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev,
1799 frag->page_offset + offset, size, dir); 1800 frag->page_offset + offset, size, dir);
1800} 1801}
1801 1802
1803static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
1804 gfp_t gfp_mask)
1805{
1806 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
1807}
1808
1802/** 1809/**
1803 * skb_clone_writable - is the header of a clone writable 1810 * skb_clone_writable - is the header of a clone writable
1804 * @skb: buffer to check 1811 * @skb: buffer to check
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 678ae4e783aa..fd3646209b65 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -840,8 +840,9 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
840EXPORT_SYMBOL(skb_copy); 840EXPORT_SYMBOL(skb_copy);
841 841
842/** 842/**
843 * pskb_copy - create copy of an sk_buff with private head. 843 * __pskb_copy - create copy of an sk_buff with private head.
844 * @skb: buffer to copy 844 * @skb: buffer to copy
845 * @headroom: headroom of new skb
845 * @gfp_mask: allocation priority 846 * @gfp_mask: allocation priority
846 * 847 *
847 * Make a copy of both an &sk_buff and part of its data, located 848 * Make a copy of both an &sk_buff and part of its data, located
@@ -852,16 +853,16 @@ EXPORT_SYMBOL(skb_copy);
852 * The returned buffer has a reference count of 1. 853 * The returned buffer has a reference count of 1.
853 */ 854 */
854 855
855struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 856struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
856{ 857{
857 unsigned int size = skb_end_pointer(skb) - skb->head; 858 unsigned int size = skb_headlen(skb) + headroom;
858 struct sk_buff *n = alloc_skb(size, gfp_mask); 859 struct sk_buff *n = alloc_skb(size, gfp_mask);
859 860
860 if (!n) 861 if (!n)
861 goto out; 862 goto out;
862 863
863 /* Set the data pointer */ 864 /* Set the data pointer */
864 skb_reserve(n, skb_headroom(skb)); 865 skb_reserve(n, headroom);
865 /* Set the tail pointer and length */ 866 /* Set the tail pointer and length */
866 skb_put(n, skb_headlen(skb)); 867 skb_put(n, skb_headlen(skb));
867 /* Copy the bytes */ 868 /* Copy the bytes */
@@ -897,7 +898,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
897out: 898out:
898 return n; 899 return n;
899} 900}
900EXPORT_SYMBOL(pskb_copy); 901EXPORT_SYMBOL(__pskb_copy);
901 902
902/** 903/**
903 * pskb_expand_head - reallocate header of &sk_buff 904 * pskb_expand_head - reallocate header of &sk_buff
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 58f69acd3d22..50788d67bdb7 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2147,7 +2147,15 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2147 */ 2147 */
2148 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2148 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2149 2149
2150 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2150 /* make sure skb->data is aligned on arches that require it */
2151 if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
2152 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2153 GFP_ATOMIC);
2154 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2155 -ENOBUFS;
2156 } else {
2157 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2158 }
2151 2159
2152 if (err == 0) { 2160 if (err == 0) {
2153 /* Update global TCP statistics. */ 2161 /* Update global TCP statistics. */