aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-09-02 01:47:01 -0400
committerDavid S. Miller <davem@davemloft.net>2005-09-02 01:47:01 -0400
commit6475be16fd9b3c6746ca4d18959246b13c669ea8 (patch)
tree03e0da36680ddb227591a4007fa4e6f18d82782c /net
parentef015786152adaff5a6a8bf0c8ea2f70cee8059d (diff)
[TCP]: Keep TSO enabled even during loss events.
All we need to do is resegment the queue so that we record SACK information accurately. The edges of the SACK blocks guide our resegmenting decisions. With help from Herbert Xu. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c36
-rw-r--r--net/ipv4/tcp_output.c55
2 files changed, 44 insertions, 47 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 1afb080bdf0c..29222b964951 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -923,14 +923,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
923 int flag = 0; 923 int flag = 0;
924 int i; 924 int i;
925 925
926 /* So, SACKs for already sent large segments will be lost.
927 * Not good, but alternative is to resegment the queue. */
928 if (sk->sk_route_caps & NETIF_F_TSO) {
929 sk->sk_route_caps &= ~NETIF_F_TSO;
930 sock_set_flag(sk, SOCK_NO_LARGESEND);
931 tp->mss_cache = tp->mss_cache;
932 }
933
934 if (!tp->sacked_out) 926 if (!tp->sacked_out)
935 tp->fackets_out = 0; 927 tp->fackets_out = 0;
936 prior_fackets = tp->fackets_out; 928 prior_fackets = tp->fackets_out;
@@ -978,20 +970,40 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
978 flag |= FLAG_DATA_LOST; 970 flag |= FLAG_DATA_LOST;
979 971
980 sk_stream_for_retrans_queue(skb, sk) { 972 sk_stream_for_retrans_queue(skb, sk) {
981 u8 sacked = TCP_SKB_CB(skb)->sacked; 973 int in_sack, pcount;
982 int in_sack; 974 u8 sacked;
983 975
984 /* The retransmission queue is always in order, so 976 /* The retransmission queue is always in order, so
985 * we can short-circuit the walk early. 977 * we can short-circuit the walk early.
986 */ 978 */
987 if(!before(TCP_SKB_CB(skb)->seq, end_seq)) 979 if (!before(TCP_SKB_CB(skb)->seq, end_seq))
988 break; 980 break;
989 981
990 fack_count += tcp_skb_pcount(skb); 982 pcount = tcp_skb_pcount(skb);
983
984 if (pcount > 1 &&
985 (after(start_seq, TCP_SKB_CB(skb)->seq) ||
986 before(end_seq, TCP_SKB_CB(skb)->end_seq))) {
987 unsigned int pkt_len;
988
989 if (after(start_seq, TCP_SKB_CB(skb)->seq))
990 pkt_len = (start_seq -
991 TCP_SKB_CB(skb)->seq);
992 else
993 pkt_len = (end_seq -
994 TCP_SKB_CB(skb)->seq);
995 if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size))
996 break;
997 pcount = tcp_skb_pcount(skb);
998 }
999
1000 fack_count += pcount;
991 1001
992 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1002 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
993 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1003 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
994 1004
1005 sacked = TCP_SKB_CB(skb)->sacked;
1006
995 /* Account D-SACK for retransmitted packet. */ 1007 /* Account D-SACK for retransmitted packet. */
996 if ((dup_sack && in_sack) && 1008 if ((dup_sack && in_sack) &&
997 (sacked & TCPCB_RETRANS) && 1009 (sacked & TCPCB_RETRANS) &&
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 75b68116682a..6094db5e11be 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -428,11 +428,11 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned
428 * packet to the list. This won't be called frequently, I hope. 428 * packet to the list. This won't be called frequently, I hope.
429 * Remember, these are still headerless SKBs at this point. 429 * Remember, these are still headerless SKBs at this point.
430 */ 430 */
431static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) 431int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
432{ 432{
433 struct tcp_sock *tp = tcp_sk(sk); 433 struct tcp_sock *tp = tcp_sk(sk);
434 struct sk_buff *buff; 434 struct sk_buff *buff;
435 int nsize; 435 int nsize, old_factor;
436 u16 flags; 436 u16 flags;
437 437
438 nsize = skb_headlen(skb) - len; 438 nsize = skb_headlen(skb) - len;
@@ -490,18 +490,29 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned
490 tp->left_out -= tcp_skb_pcount(skb); 490 tp->left_out -= tcp_skb_pcount(skb);
491 } 491 }
492 492
493 old_factor = tcp_skb_pcount(skb);
494
493 /* Fix up tso_factor for both original and new SKB. */ 495 /* Fix up tso_factor for both original and new SKB. */
494 tcp_set_skb_tso_segs(sk, skb, mss_now); 496 tcp_set_skb_tso_segs(sk, skb, mss_now);
495 tcp_set_skb_tso_segs(sk, buff, mss_now); 497 tcp_set_skb_tso_segs(sk, buff, mss_now);
496 498
497 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { 499 /* If this packet has been sent out already, we must
498 tp->lost_out += tcp_skb_pcount(skb); 500 * adjust the various packet counters.
499 tp->left_out += tcp_skb_pcount(skb); 501 */
500 } 502 if (after(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
503 int diff = old_factor - tcp_skb_pcount(skb) -
504 tcp_skb_pcount(buff);
501 505
502 if (TCP_SKB_CB(buff)->sacked&TCPCB_LOST) { 506 tp->packets_out -= diff;
503 tp->lost_out += tcp_skb_pcount(buff); 507 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
504 tp->left_out += tcp_skb_pcount(buff); 508 tp->lost_out -= diff;
509 tp->left_out -= diff;
510 }
511 if (diff > 0) {
512 tp->fackets_out -= diff;
513 if ((int)tp->fackets_out < 0)
514 tp->fackets_out = 0;
515 }
505 } 516 }
506 517
507 /* Link BUFF into the send queue. */ 518 /* Link BUFF into the send queue. */
@@ -1350,12 +1361,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1350 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 1361 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
1351 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1362 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1352 BUG(); 1363 BUG();
1353
1354 if (sk->sk_route_caps & NETIF_F_TSO) {
1355 sk->sk_route_caps &= ~NETIF_F_TSO;
1356 sock_set_flag(sk, SOCK_NO_LARGESEND);
1357 }
1358
1359 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 1364 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
1360 return -ENOMEM; 1365 return -ENOMEM;
1361 } 1366 }
@@ -1370,22 +1375,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1370 return -EAGAIN; 1375 return -EAGAIN;
1371 1376
1372 if (skb->len > cur_mss) { 1377 if (skb->len > cur_mss) {
1373 int old_factor = tcp_skb_pcount(skb);
1374 int diff;
1375
1376 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 1378 if (tcp_fragment(sk, skb, cur_mss, cur_mss))
1377 return -ENOMEM; /* We'll try again later. */ 1379 return -ENOMEM; /* We'll try again later. */
1378
1379 /* New SKB created, account for it. */
1380 diff = old_factor - tcp_skb_pcount(skb) -
1381 tcp_skb_pcount(skb->next);
1382 tp->packets_out -= diff;
1383
1384 if (diff > 0) {
1385 tp->fackets_out -= diff;
1386 if ((int)tp->fackets_out < 0)
1387 tp->fackets_out = 0;
1388 }
1389 } 1380 }
1390 1381
1391 /* Collapse two adjacent packets if worthwhile and we can. */ 1382 /* Collapse two adjacent packets if worthwhile and we can. */
@@ -1993,12 +1984,6 @@ int tcp_write_wakeup(struct sock *sk)
1993 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 1984 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
1994 if (tcp_fragment(sk, skb, seg_size, mss)) 1985 if (tcp_fragment(sk, skb, seg_size, mss))
1995 return -1; 1986 return -1;
1996 /* SWS override triggered forced fragmentation.
1997 * Disable TSO, the connection is too sick. */
1998 if (sk->sk_route_caps & NETIF_F_TSO) {
1999 sock_set_flag(sk, SOCK_NO_LARGESEND);
2000 sk->sk_route_caps &= ~NETIF_F_TSO;
2001 }
2002 } else if (!tcp_skb_pcount(skb)) 1987 } else if (!tcp_skb_pcount(skb))
2003 tcp_set_skb_tso_segs(sk, skb, mss); 1988 tcp_set_skb_tso_segs(sk, skb, mss);
2004 1989