aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-12-01 08:07:02 -0500
committerDavid S. Miller <davem@davemloft.net>2012-12-01 20:39:16 -0500
commit64022d0b4e93ea432e95db55a72b8a1c5775f3c0 (patch)
treeea2ac7d7a7ed903d645c21d99f02ff6fdbb6aa96 /net/ipv4/tcp.c
parent9f8933e960f98d27742727445061b0ece934e5cf (diff)
tcp: fix crashes in do_tcp_sendpages()
Recent network changes allowed high order pages being used for skb fragments. This uncovered a bug in do_tcp_sendpages() which was assuming its caller provided an array of order-0 page pointers. We only have to deal with a single page in this function, and its order is irrelevant. Reported-by: Willy Tarreau <w@1wt.eu> Tested-by: Willy Tarreau <w@1wt.eu> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 083092e3aed6..e457c7ab2e28 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -830,8 +830,8 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
830 return mss_now; 830 return mss_now;
831} 831}
832 832
833static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, 833static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
834 size_t psize, int flags) 834 size_t size, int flags)
835{ 835{
836 struct tcp_sock *tp = tcp_sk(sk); 836 struct tcp_sock *tp = tcp_sk(sk);
837 int mss_now, size_goal; 837 int mss_now, size_goal;
@@ -858,12 +858,9 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
858 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 858 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
859 goto out_err; 859 goto out_err;
860 860
861 while (psize > 0) { 861 while (size > 0) {
862 struct sk_buff *skb = tcp_write_queue_tail(sk); 862 struct sk_buff *skb = tcp_write_queue_tail(sk);
863 struct page *page = pages[poffset / PAGE_SIZE];
864 int copy, i; 863 int copy, i;
865 int offset = poffset % PAGE_SIZE;
866 int size = min_t(size_t, psize, PAGE_SIZE - offset);
867 bool can_coalesce; 864 bool can_coalesce;
868 865
869 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { 866 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
@@ -912,8 +909,8 @@ new_segment:
912 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 909 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
913 910
914 copied += copy; 911 copied += copy;
915 poffset += copy; 912 offset += copy;
916 if (!(psize -= copy)) 913 if (!(size -= copy))
917 goto out; 914 goto out;
918 915
919 if (skb->len < size_goal || (flags & MSG_OOB)) 916 if (skb->len < size_goal || (flags & MSG_OOB))
@@ -960,7 +957,7 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
960 flags); 957 flags);
961 958
962 lock_sock(sk); 959 lock_sock(sk);
963 res = do_tcp_sendpages(sk, &page, offset, size, flags); 960 res = do_tcp_sendpages(sk, page, offset, size, flags);
964 release_sock(sk); 961 release_sock(sk);
965 return res; 962 return res;
966} 963}