aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c99
1 files changed, 58 insertions, 41 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ca9590f4f520..20c9440ab85e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1400,11 +1400,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1400 /* DSACK info lost if out-of-mem, try SACK still */ 1400 /* DSACK info lost if out-of-mem, try SACK still */
1401 if (in_sack <= 0) 1401 if (in_sack <= 0)
1402 in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq); 1402 in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
1403 if (in_sack < 0) 1403 if (unlikely(in_sack < 0))
1404 break; 1404 break;
1405 1405
1406 fack_count += tcp_skb_pcount(skb);
1407
1408 sacked = TCP_SKB_CB(skb)->sacked; 1406 sacked = TCP_SKB_CB(skb)->sacked;
1409 1407
1410 /* Account D-SACK for retransmitted packet. */ 1408 /* Account D-SACK for retransmitted packet. */
@@ -1419,19 +1417,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1419 if ((dup_sack && in_sack) && 1417 if ((dup_sack && in_sack) &&
1420 (sacked&TCPCB_SACKED_ACKED)) 1418 (sacked&TCPCB_SACKED_ACKED))
1421 reord = min(fack_count, reord); 1419 reord = min(fack_count, reord);
1422 } else {
1423 /* If it was in a hole, we detected reordering. */
1424 if (fack_count < prior_fackets &&
1425 !(sacked&TCPCB_SACKED_ACKED))
1426 reord = min(fack_count, reord);
1427 } 1420 }
1428 1421
1429 /* Nothing to do; acked frame is about to be dropped. */ 1422 /* Nothing to do; acked frame is about to be dropped. */
1423 fack_count += tcp_skb_pcount(skb);
1430 continue; 1424 continue;
1431 } 1425 }
1432 1426
1433 if (!in_sack) 1427 if (!in_sack) {
1428 fack_count += tcp_skb_pcount(skb);
1434 continue; 1429 continue;
1430 }
1435 1431
1436 if (!(sacked&TCPCB_SACKED_ACKED)) { 1432 if (!(sacked&TCPCB_SACKED_ACKED)) {
1437 if (sacked & TCPCB_SACKED_RETRANS) { 1433 if (sacked & TCPCB_SACKED_RETRANS) {
@@ -1448,12 +1444,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1448 tp->retransmit_skb_hint = NULL; 1444 tp->retransmit_skb_hint = NULL;
1449 } 1445 }
1450 } else { 1446 } else {
1451 /* New sack for not retransmitted frame, 1447 if (!(sacked & TCPCB_RETRANS)) {
1452 * which was in hole. It is reordering. 1448 /* New sack for not retransmitted frame,
1453 */ 1449 * which was in hole. It is reordering.
1454 if (!(sacked & TCPCB_RETRANS) && 1450 */
1455 fack_count < prior_fackets) 1451 if (fack_count < prior_fackets)
1456 reord = min(fack_count, reord); 1452 reord = min(fack_count, reord);
1453
1454 /* SACK enhanced F-RTO (RFC4138; Appendix B) */
1455 if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
1456 flag |= FLAG_ONLY_ORIG_SACKED;
1457 }
1457 1458
1458 if (sacked & TCPCB_LOST) { 1459 if (sacked & TCPCB_LOST) {
1459 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 1460 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
@@ -1462,24 +1463,13 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1462 /* clear lost hint */ 1463 /* clear lost hint */
1463 tp->retransmit_skb_hint = NULL; 1464 tp->retransmit_skb_hint = NULL;
1464 } 1465 }
1465 /* SACK enhanced F-RTO detection.
1466 * Set flag if and only if non-rexmitted
1467 * segments below frto_highmark are
1468 * SACKed (RFC4138; Appendix B).
1469 * Clearing correct due to in-order walk
1470 */
1471 if (after(end_seq, tp->frto_highmark)) {
1472 flag &= ~FLAG_ONLY_ORIG_SACKED;
1473 } else {
1474 if (!(sacked & TCPCB_RETRANS))
1475 flag |= FLAG_ONLY_ORIG_SACKED;
1476 }
1477 } 1466 }
1478 1467
1479 TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED; 1468 TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED;
1480 flag |= FLAG_DATA_SACKED; 1469 flag |= FLAG_DATA_SACKED;
1481 tp->sacked_out += tcp_skb_pcount(skb); 1470 tp->sacked_out += tcp_skb_pcount(skb);
1482 1471
1472 fack_count += tcp_skb_pcount(skb);
1483 if (fack_count > tp->fackets_out) 1473 if (fack_count > tp->fackets_out)
1484 tp->fackets_out = fack_count; 1474 tp->fackets_out = fack_count;
1485 1475
@@ -1490,6 +1480,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1490 } else { 1480 } else {
1491 if (dup_sack && (sacked&TCPCB_RETRANS)) 1481 if (dup_sack && (sacked&TCPCB_RETRANS))
1492 reord = min(fack_count, reord); 1482 reord = min(fack_count, reord);
1483
1484 fack_count += tcp_skb_pcount(skb);
1493 } 1485 }
1494 1486
1495 /* D-SACK. We can detect redundant retransmission 1487 /* D-SACK. We can detect redundant retransmission
@@ -1504,6 +1496,12 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1504 tp->retransmit_skb_hint = NULL; 1496 tp->retransmit_skb_hint = NULL;
1505 } 1497 }
1506 } 1498 }
1499
1500 /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct
1501 * due to in-order walk
1502 */
1503 if (after(end_seq, tp->frto_highmark))
1504 flag &= ~FLAG_ONLY_ORIG_SACKED;
1507 } 1505 }
1508 1506
1509 if (tp->retrans_out && 1507 if (tp->retrans_out &&
@@ -1515,7 +1513,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1515 1513
1516 if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss && 1514 if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss &&
1517 (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) 1515 (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
1518 tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0); 1516 tcp_update_reordering(sk, tp->fackets_out - reord, 0);
1519 1517
1520#if FASTRETRANS_DEBUG > 0 1518#if FASTRETRANS_DEBUG > 0
1521 BUG_TRAP((int)tp->sacked_out >= 0); 1519 BUG_TRAP((int)tp->sacked_out >= 0);
@@ -2630,7 +2628,8 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
2630 * is before the ack sequence we can discard it as it's confirmed to have 2628 * is before the ack sequence we can discard it as it's confirmed to have
2631 * arrived at the other end. 2629 * arrived at the other end.
2632 */ 2630 */
2633static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p) 2631static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p,
2632 int prior_fackets)
2634{ 2633{
2635 struct tcp_sock *tp = tcp_sk(sk); 2634 struct tcp_sock *tp = tcp_sk(sk);
2636 const struct inet_connection_sock *icsk = inet_csk(sk); 2635 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2639,6 +2638,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
2639 int fully_acked = 1; 2638 int fully_acked = 1;
2640 int flag = 0; 2639 int flag = 0;
2641 int prior_packets = tp->packets_out; 2640 int prior_packets = tp->packets_out;
2641 u32 cnt = 0;
2642 u32 reord = tp->packets_out;
2642 s32 seq_rtt = -1; 2643 s32 seq_rtt = -1;
2643 ktime_t last_ackt = net_invalid_timestamp(); 2644 ktime_t last_ackt = net_invalid_timestamp();
2644 2645
@@ -2679,10 +2680,14 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
2679 if ((flag & FLAG_DATA_ACKED) || 2680 if ((flag & FLAG_DATA_ACKED) ||
2680 (packets_acked > 1)) 2681 (packets_acked > 1))
2681 flag |= FLAG_NONHEAD_RETRANS_ACKED; 2682 flag |= FLAG_NONHEAD_RETRANS_ACKED;
2682 } else if (seq_rtt < 0) { 2683 } else {
2683 seq_rtt = now - scb->when; 2684 if (seq_rtt < 0) {
2684 if (fully_acked) 2685 seq_rtt = now - scb->when;
2685 last_ackt = skb->tstamp; 2686 if (fully_acked)
2687 last_ackt = skb->tstamp;
2688 }
2689 if (!(sacked & TCPCB_SACKED_ACKED))
2690 reord = min(cnt, reord);
2686 } 2691 }
2687 2692
2688 if (sacked & TCPCB_SACKED_ACKED) 2693 if (sacked & TCPCB_SACKED_ACKED)
@@ -2693,12 +2698,16 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
2693 if ((sacked & TCPCB_URG) && tp->urg_mode && 2698 if ((sacked & TCPCB_URG) && tp->urg_mode &&
2694 !before(end_seq, tp->snd_up)) 2699 !before(end_seq, tp->snd_up))
2695 tp->urg_mode = 0; 2700 tp->urg_mode = 0;
2696 } else if (seq_rtt < 0) { 2701 } else {
2697 seq_rtt = now - scb->when; 2702 if (seq_rtt < 0) {
2698 if (fully_acked) 2703 seq_rtt = now - scb->when;
2699 last_ackt = skb->tstamp; 2704 if (fully_acked)
2705 last_ackt = skb->tstamp;
2706 }
2707 reord = min(cnt, reord);
2700 } 2708 }
2701 tp->packets_out -= packets_acked; 2709 tp->packets_out -= packets_acked;
2710 cnt += packets_acked;
2702 2711
2703 /* Initial outgoing SYN's get put onto the write_queue 2712 /* Initial outgoing SYN's get put onto the write_queue
2704 * just like anything else we transmit. It is not 2713 * just like anything else we transmit. It is not
@@ -2730,13 +2739,18 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
2730 tcp_ack_update_rtt(sk, flag, seq_rtt); 2739 tcp_ack_update_rtt(sk, flag, seq_rtt);
2731 tcp_rearm_rto(sk); 2740 tcp_rearm_rto(sk);
2732 2741
2742 if (tcp_is_reno(tp)) {
2743 tcp_remove_reno_sacks(sk, pkts_acked);
2744 } else {
2745 /* Non-retransmitted hole got filled? That's reordering */
2746 if (reord < prior_fackets)
2747 tcp_update_reordering(sk, tp->fackets_out - reord, 0);
2748 }
2749
2733 tp->fackets_out -= min(pkts_acked, tp->fackets_out); 2750 tp->fackets_out -= min(pkts_acked, tp->fackets_out);
2734 /* hint's skb might be NULL but we don't need to care */ 2751 /* hint's skb might be NULL but we don't need to care */
2735 tp->fastpath_cnt_hint -= min_t(u32, pkts_acked, 2752 tp->fastpath_cnt_hint -= min_t(u32, pkts_acked,
2736 tp->fastpath_cnt_hint); 2753 tp->fastpath_cnt_hint);
2737 if (tcp_is_reno(tp))
2738 tcp_remove_reno_sacks(sk, pkts_acked);
2739
2740 if (ca_ops->pkts_acked) { 2754 if (ca_ops->pkts_acked) {
2741 s32 rtt_us = -1; 2755 s32 rtt_us = -1;
2742 2756
@@ -3019,6 +3033,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3019 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3033 u32 ack_seq = TCP_SKB_CB(skb)->seq;
3020 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3034 u32 ack = TCP_SKB_CB(skb)->ack_seq;
3021 u32 prior_in_flight; 3035 u32 prior_in_flight;
3036 u32 prior_fackets;
3022 s32 seq_rtt; 3037 s32 seq_rtt;
3023 int prior_packets; 3038 int prior_packets;
3024 int frto_cwnd = 0; 3039 int frto_cwnd = 0;
@@ -3043,6 +3058,8 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3043 tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache); 3058 tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache);
3044 } 3059 }
3045 3060
3061 prior_fackets = tp->fackets_out;
3062
3046 if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 3063 if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
3047 /* Window is constant, pure forward advance. 3064 /* Window is constant, pure forward advance.
3048 * No more checks are required. 3065 * No more checks are required.
@@ -3084,7 +3101,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3084 prior_in_flight = tcp_packets_in_flight(tp); 3101 prior_in_flight = tcp_packets_in_flight(tp);
3085 3102
3086 /* See if we can take anything off of the retransmit queue. */ 3103 /* See if we can take anything off of the retransmit queue. */
3087 flag |= tcp_clean_rtx_queue(sk, &seq_rtt); 3104 flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets);
3088 3105
3089 /* Guarantee sacktag reordering detection against wrap-arounds */ 3106 /* Guarantee sacktag reordering detection against wrap-arounds */
3090 if (before(tp->frto_highmark, tp->snd_una)) 3107 if (before(tp->frto_highmark, tp->snd_una))