aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2005-04-24 22:12:33 -0400
committerDavid S. Miller <davem@davemloft.net>2005-04-24 22:12:33 -0400
commitd5ac99a648b8c61d0c7f1c32a8ab7f1dca0123d2 (patch)
tree3811d84e83dab918c57aeca1081ae343cb97df8b /net/ipv4/tcp_output.c
parent158a0e45b69254a9ee4d7795e3b98d8c959fb799 (diff)
[TCP]: skb pcount with MTU discovery
The problem is that when doing MTU discovery, the too-large segments in the write queue will be calculated as having a pcount of >1. When tcp_write_xmit() is trying to send, tcp_snd_test() fails the cwnd test when pcount > cwnd. The segments are eventually transmitted one at a time by keepalive, but this can take a long time. This patch checks if TSO is enabled when setting pcount. Signed-off-by: John Heffner <jheffner@psc.edu> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 13c14cb6dee4..a12df6979ffd 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -427,7 +427,7 @@ void tcp_push_one(struct sock *sk, unsigned cur_mss)
427 struct tcp_sock *tp = tcp_sk(sk); 427 struct tcp_sock *tp = tcp_sk(sk);
428 struct sk_buff *skb = sk->sk_send_head; 428 struct sk_buff *skb = sk->sk_send_head;
429 429
430 if (tcp_snd_test(tp, skb, cur_mss, TCP_NAGLE_PUSH)) { 430 if (tcp_snd_test(sk, skb, cur_mss, TCP_NAGLE_PUSH)) {
431 /* Send it out now. */ 431 /* Send it out now. */
432 TCP_SKB_CB(skb)->when = tcp_time_stamp; 432 TCP_SKB_CB(skb)->when = tcp_time_stamp;
433 tcp_tso_set_push(skb); 433 tcp_tso_set_push(skb);
@@ -440,9 +440,12 @@ void tcp_push_one(struct sock *sk, unsigned cur_mss)
440 } 440 }
441} 441}
442 442
443void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_std) 443void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
444{ 444{
445 if (skb->len <= mss_std) { 445 struct tcp_sock *tp = tcp_sk(sk);
446
447 if (skb->len <= tp->mss_cache_std ||
448 !(sk->sk_route_caps & NETIF_F_TSO)) {
446 /* Avoid the costly divide in the normal 449 /* Avoid the costly divide in the normal
447 * non-TSO case. 450 * non-TSO case.
448 */ 451 */
@@ -451,10 +454,10 @@ void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_std)
451 } else { 454 } else {
452 unsigned int factor; 455 unsigned int factor;
453 456
454 factor = skb->len + (mss_std - 1); 457 factor = skb->len + (tp->mss_cache_std - 1);
455 factor /= mss_std; 458 factor /= tp->mss_cache_std;
456 skb_shinfo(skb)->tso_segs = factor; 459 skb_shinfo(skb)->tso_segs = factor;
457 skb_shinfo(skb)->tso_size = mss_std; 460 skb_shinfo(skb)->tso_size = tp->mss_cache_std;
458 } 461 }
459} 462}
460 463
@@ -525,8 +528,8 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
525 } 528 }
526 529
527 /* Fix up tso_factor for both original and new SKB. */ 530 /* Fix up tso_factor for both original and new SKB. */
528 tcp_set_skb_tso_segs(skb, tp->mss_cache_std); 531 tcp_set_skb_tso_segs(sk, skb);
529 tcp_set_skb_tso_segs(buff, tp->mss_cache_std); 532 tcp_set_skb_tso_segs(sk, buff);
530 533
531 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { 534 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
532 tp->lost_out += tcp_skb_pcount(skb); 535 tp->lost_out += tcp_skb_pcount(skb);
@@ -601,7 +604,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
601 * factor and mss. 604 * factor and mss.
602 */ 605 */
603 if (tcp_skb_pcount(skb) > 1) 606 if (tcp_skb_pcount(skb) > 1)
604 tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb)); 607 tcp_set_skb_tso_segs(sk, skb);
605 608
606 return 0; 609 return 0;
607} 610}
@@ -752,7 +755,7 @@ int tcp_write_xmit(struct sock *sk, int nonagle)
752 mss_now = tcp_current_mss(sk, 1); 755 mss_now = tcp_current_mss(sk, 1);
753 756
754 while ((skb = sk->sk_send_head) && 757 while ((skb = sk->sk_send_head) &&
755 tcp_snd_test(tp, skb, mss_now, 758 tcp_snd_test(sk, skb, mss_now,
756 tcp_skb_is_last(sk, skb) ? nonagle : 759 tcp_skb_is_last(sk, skb) ? nonagle :
757 TCP_NAGLE_PUSH)) { 760 TCP_NAGLE_PUSH)) {
758 if (skb->len > mss_now) { 761 if (skb->len > mss_now) {
@@ -1676,7 +1679,7 @@ int tcp_write_wakeup(struct sock *sk)
1676 tp->mss_cache = tp->mss_cache_std; 1679 tp->mss_cache = tp->mss_cache_std;
1677 } 1680 }
1678 } else if (!tcp_skb_pcount(skb)) 1681 } else if (!tcp_skb_pcount(skb))
1679 tcp_set_skb_tso_segs(skb, tp->mss_cache_std); 1682 tcp_set_skb_tso_segs(sk, skb);
1680 1683
1681 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 1684 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
1682 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1685 TCP_SKB_CB(skb)->when = tcp_time_stamp;