diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2005-04-24 22:12:33 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-04-24 22:12:33 -0400 |
commit | d5ac99a648b8c61d0c7f1c32a8ab7f1dca0123d2 (patch) | |
tree | 3811d84e83dab918c57aeca1081ae343cb97df8b /include/net/tcp.h | |
parent | 158a0e45b69254a9ee4d7795e3b98d8c959fb799 (diff) |
[TCP]: skb pcount with MTU discovery
The problem is that when doing MTU discovery, the too-large segments in
the write queue will be calculated as having a pcount of >1. When
tcp_write_xmit() is trying to send, tcp_snd_test() fails the cwnd test
when pcount > cwnd.
The segments are eventually transmitted one at a time by keepalive, but
this can take a long time.
This patch checks if TSO is enabled when setting pcount.
Signed-off-by: John Heffner <jheffner@psc.edu>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r-- | include/net/tcp.h | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 503810a70e21..9355ae5b1d75 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -1417,19 +1417,20 @@ tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb, | |||
1417 | tcp_minshall_check(tp)))); | 1417 | tcp_minshall_check(tp)))); |
1418 | } | 1418 | } |
1419 | 1419 | ||
1420 | extern void tcp_set_skb_tso_segs(struct sk_buff *, unsigned int); | 1420 | extern void tcp_set_skb_tso_segs(struct sock *, struct sk_buff *); |
1421 | 1421 | ||
1422 | /* This checks if the data bearing packet SKB (usually sk->sk_send_head) | 1422 | /* This checks if the data bearing packet SKB (usually sk->sk_send_head) |
1423 | * should be put on the wire right now. | 1423 | * should be put on the wire right now. |
1424 | */ | 1424 | */ |
1425 | static __inline__ int tcp_snd_test(const struct tcp_sock *tp, | 1425 | static __inline__ int tcp_snd_test(struct sock *sk, |
1426 | struct sk_buff *skb, | 1426 | struct sk_buff *skb, |
1427 | unsigned cur_mss, int nonagle) | 1427 | unsigned cur_mss, int nonagle) |
1428 | { | 1428 | { |
1429 | struct tcp_sock *tp = tcp_sk(sk); | ||
1429 | int pkts = tcp_skb_pcount(skb); | 1430 | int pkts = tcp_skb_pcount(skb); |
1430 | 1431 | ||
1431 | if (!pkts) { | 1432 | if (!pkts) { |
1432 | tcp_set_skb_tso_segs(skb, tp->mss_cache_std); | 1433 | tcp_set_skb_tso_segs(sk, skb); |
1433 | pkts = tcp_skb_pcount(skb); | 1434 | pkts = tcp_skb_pcount(skb); |
1434 | } | 1435 | } |
1435 | 1436 | ||
@@ -1490,7 +1491,7 @@ static __inline__ void __tcp_push_pending_frames(struct sock *sk, | |||
1490 | if (skb) { | 1491 | if (skb) { |
1491 | if (!tcp_skb_is_last(sk, skb)) | 1492 | if (!tcp_skb_is_last(sk, skb)) |
1492 | nonagle = TCP_NAGLE_PUSH; | 1493 | nonagle = TCP_NAGLE_PUSH; |
1493 | if (!tcp_snd_test(tp, skb, cur_mss, nonagle) || | 1494 | if (!tcp_snd_test(sk, skb, cur_mss, nonagle) || |
1494 | tcp_write_xmit(sk, nonagle)) | 1495 | tcp_write_xmit(sk, nonagle)) |
1495 | tcp_check_probe_timer(sk, tp); | 1496 | tcp_check_probe_timer(sk, tp); |
1496 | } | 1497 | } |
@@ -1508,7 +1509,7 @@ static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) | |||
1508 | struct sk_buff *skb = sk->sk_send_head; | 1509 | struct sk_buff *skb = sk->sk_send_head; |
1509 | 1510 | ||
1510 | return (skb && | 1511 | return (skb && |
1511 | tcp_snd_test(tp, skb, tcp_current_mss(sk, 1), | 1512 | tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), |
1512 | tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle)); | 1513 | tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle)); |
1513 | } | 1514 | } |
1514 | 1515 | ||