aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2005-08-04 22:52:02 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-08-05 00:43:14 -0400
commitb68e9f857271189bd7a59b74c99890de9195b0e1 (patch)
tree92ef0899ca9ff07f64458febcb7ab8095e891042 /net
parent846998ae87a80b0fd45b4cf5cf001a159d746f27 (diff)
[PATCH] tcp: fix TSO cwnd caching bug
tcp_write_xmit caches the cwnd value indirectly in cwnd_quota. When tcp_transmit_skb reduces the cwnd because of tcp_enter_cwr, the cached value becomes invalid. This patch ensures that the cwnd value is always reread after each tcp_transmit_skb call. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_output.c34
1 files changed, 9 insertions, 25 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e118b4b5b326..7d076f0db100 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -972,19 +972,18 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
972 if (unlikely(sk->sk_state == TCP_CLOSE)) 972 if (unlikely(sk->sk_state == TCP_CLOSE))
973 return 0; 973 return 0;
974 974
975 skb = sk->sk_send_head;
976 if (unlikely(!skb))
977 return 0;
978
979 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
980 cwnd_quota = tcp_cwnd_test(tp, skb);
981 if (unlikely(!cwnd_quota))
982 goto out;
983
984 sent_pkts = 0; 975 sent_pkts = 0;
985 while (likely(tcp_snd_wnd_test(tp, skb, mss_now))) { 976 while ((skb = sk->sk_send_head)) {
977 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
986 BUG_ON(!tso_segs); 978 BUG_ON(!tso_segs);
987 979
980 cwnd_quota = tcp_cwnd_test(tp, skb);
981 if (!cwnd_quota)
982 break;
983
984 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
985 break;
986
988 if (tso_segs == 1) { 987 if (tso_segs == 1) {
989 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 988 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
990 (tcp_skb_is_last(sk, skb) ? 989 (tcp_skb_is_last(sk, skb) ?
@@ -1026,27 +1025,12 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1026 1025
1027 tcp_minshall_update(tp, mss_now, skb); 1026 tcp_minshall_update(tp, mss_now, skb);
1028 sent_pkts++; 1027 sent_pkts++;
1029
1030 /* Do not optimize this to use tso_segs. If we chopped up
1031 * the packet above, tso_segs will no longer be valid.
1032 */
1033 cwnd_quota -= tcp_skb_pcount(skb);
1034
1035 BUG_ON(cwnd_quota < 0);
1036 if (!cwnd_quota)
1037 break;
1038
1039 skb = sk->sk_send_head;
1040 if (!skb)
1041 break;
1042 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1043 } 1028 }
1044 1029
1045 if (likely(sent_pkts)) { 1030 if (likely(sent_pkts)) {
1046 tcp_cwnd_validate(sk, tp); 1031 tcp_cwnd_validate(sk, tp);
1047 return 0; 1032 return 0;
1048 } 1033 }
1049out:
1050 return !tp->packets_out && sk->sk_send_head; 1034 return !tp->packets_out && sk->sk_send_head;
1051} 1035}
1052 1036