aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-11-27 17:42:01 -0500
committerDavid S. Miller <davem@davemloft.net>2018-11-30 16:26:53 -0500
commit19119f298bb1f2af3bb1093f5f2a1fed8da94e37 (patch)
tree40fc0b12fcae58b8627c8e2913ae7a758fe463b6 /net/ipv4/tcp_input.c
parentebeef4bcccf4681ba3de04fbf3a005f6054f7999 (diff)
tcp: take care of compressed acks in tcp_add_reno_sack()
Neal pointed out that non sack flows might suffer from ACK compression added in the following patch ("tcp: implement coalescing on backlog queue") Instead of tweaking tcp_add_backlog() we can take into account how many ACK were coalesced, this information will be available in skb_shinfo(skb)->gso_segs Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c58
1 files changed, 33 insertions, 25 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 78752746a6e2..76858b14ebe9 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1865,16 +1865,20 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
1865 1865
1866/* Emulate SACKs for SACKless connection: account for a new dupack. */ 1866/* Emulate SACKs for SACKless connection: account for a new dupack. */
1867 1867
1868static void tcp_add_reno_sack(struct sock *sk) 1868static void tcp_add_reno_sack(struct sock *sk, int num_dupack)
1869{ 1869{
1870 struct tcp_sock *tp = tcp_sk(sk); 1870 if (num_dupack) {
1871 u32 prior_sacked = tp->sacked_out; 1871 struct tcp_sock *tp = tcp_sk(sk);
1872 u32 prior_sacked = tp->sacked_out;
1873 s32 delivered;
1872 1874
1873 tp->sacked_out++; 1875 tp->sacked_out += num_dupack;
1874 tcp_check_reno_reordering(sk, 0); 1876 tcp_check_reno_reordering(sk, 0);
1875 if (tp->sacked_out > prior_sacked) 1877 delivered = tp->sacked_out - prior_sacked;
1876 tp->delivered++; /* Some out-of-order packet is delivered */ 1878 if (delivered > 0)
1877 tcp_verify_left_out(tp); 1879 tp->delivered += delivered;
1880 tcp_verify_left_out(tp);
1881 }
1878} 1882}
1879 1883
1880/* Account for ACK, ACKing some data in Reno Recovery phase. */ 1884/* Account for ACK, ACKing some data in Reno Recovery phase. */
@@ -2636,7 +2640,7 @@ void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2636/* Process an ACK in CA_Loss state. Move to CA_Open if lost data are 2640/* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
2637 * recovered or spurious. Otherwise retransmits more on partial ACKs. 2641 * recovered or spurious. Otherwise retransmits more on partial ACKs.
2638 */ 2642 */
2639static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack, 2643static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
2640 int *rexmit) 2644 int *rexmit)
2641{ 2645{
2642 struct tcp_sock *tp = tcp_sk(sk); 2646 struct tcp_sock *tp = tcp_sk(sk);
@@ -2655,7 +2659,7 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
2655 return; 2659 return;
2656 2660
2657 if (after(tp->snd_nxt, tp->high_seq)) { 2661 if (after(tp->snd_nxt, tp->high_seq)) {
2658 if (flag & FLAG_DATA_SACKED || is_dupack) 2662 if (flag & FLAG_DATA_SACKED || num_dupack)
2659 tp->frto = 0; /* Step 3.a. loss was real */ 2663 tp->frto = 0; /* Step 3.a. loss was real */
2660 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { 2664 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
2661 tp->high_seq = tp->snd_nxt; 2665 tp->high_seq = tp->snd_nxt;
@@ -2681,8 +2685,8 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
2681 /* A Reno DUPACK means new data in F-RTO step 2.b above are 2685 /* A Reno DUPACK means new data in F-RTO step 2.b above are
2682 * delivered. Lower inflight to clock out (re)tranmissions. 2686 * delivered. Lower inflight to clock out (re)tranmissions.
2683 */ 2687 */
2684 if (after(tp->snd_nxt, tp->high_seq) && is_dupack) 2688 if (after(tp->snd_nxt, tp->high_seq) && num_dupack)
2685 tcp_add_reno_sack(sk); 2689 tcp_add_reno_sack(sk, num_dupack);
2686 else if (flag & FLAG_SND_UNA_ADVANCED) 2690 else if (flag & FLAG_SND_UNA_ADVANCED)
2687 tcp_reset_reno_sack(tp); 2691 tcp_reset_reno_sack(tp);
2688 } 2692 }
@@ -2759,13 +2763,13 @@ static bool tcp_force_fast_retransmit(struct sock *sk)
2759 * tcp_xmit_retransmit_queue(). 2763 * tcp_xmit_retransmit_queue().
2760 */ 2764 */
2761static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, 2765static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
2762 bool is_dupack, int *ack_flag, int *rexmit) 2766 int num_dupack, int *ack_flag, int *rexmit)
2763{ 2767{
2764 struct inet_connection_sock *icsk = inet_csk(sk); 2768 struct inet_connection_sock *icsk = inet_csk(sk);
2765 struct tcp_sock *tp = tcp_sk(sk); 2769 struct tcp_sock *tp = tcp_sk(sk);
2766 int fast_rexmit = 0, flag = *ack_flag; 2770 int fast_rexmit = 0, flag = *ack_flag;
2767 bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 2771 bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) &&
2768 tcp_force_fast_retransmit(sk)); 2772 tcp_force_fast_retransmit(sk));
2769 2773
2770 if (!tp->packets_out && tp->sacked_out) 2774 if (!tp->packets_out && tp->sacked_out)
2771 tp->sacked_out = 0; 2775 tp->sacked_out = 0;
@@ -2812,8 +2816,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
2812 switch (icsk->icsk_ca_state) { 2816 switch (icsk->icsk_ca_state) {
2813 case TCP_CA_Recovery: 2817 case TCP_CA_Recovery:
2814 if (!(flag & FLAG_SND_UNA_ADVANCED)) { 2818 if (!(flag & FLAG_SND_UNA_ADVANCED)) {
2815 if (tcp_is_reno(tp) && is_dupack) 2819 if (tcp_is_reno(tp))
2816 tcp_add_reno_sack(sk); 2820 tcp_add_reno_sack(sk, num_dupack);
2817 } else { 2821 } else {
2818 if (tcp_try_undo_partial(sk, prior_snd_una)) 2822 if (tcp_try_undo_partial(sk, prior_snd_una))
2819 return; 2823 return;
@@ -2828,7 +2832,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
2828 tcp_identify_packet_loss(sk, ack_flag); 2832 tcp_identify_packet_loss(sk, ack_flag);
2829 break; 2833 break;
2830 case TCP_CA_Loss: 2834 case TCP_CA_Loss:
2831 tcp_process_loss(sk, flag, is_dupack, rexmit); 2835 tcp_process_loss(sk, flag, num_dupack, rexmit);
2832 tcp_identify_packet_loss(sk, ack_flag); 2836 tcp_identify_packet_loss(sk, ack_flag);
2833 if (!(icsk->icsk_ca_state == TCP_CA_Open || 2837 if (!(icsk->icsk_ca_state == TCP_CA_Open ||
2834 (*ack_flag & FLAG_LOST_RETRANS))) 2838 (*ack_flag & FLAG_LOST_RETRANS)))
@@ -2839,8 +2843,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
2839 if (tcp_is_reno(tp)) { 2843 if (tcp_is_reno(tp)) {
2840 if (flag & FLAG_SND_UNA_ADVANCED) 2844 if (flag & FLAG_SND_UNA_ADVANCED)
2841 tcp_reset_reno_sack(tp); 2845 tcp_reset_reno_sack(tp);
2842 if (is_dupack) 2846 tcp_add_reno_sack(sk, num_dupack);
2843 tcp_add_reno_sack(sk);
2844 } 2847 }
2845 2848
2846 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 2849 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
@@ -3562,7 +3565,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3562 bool is_sack_reneg = tp->is_sack_reneg; 3565 bool is_sack_reneg = tp->is_sack_reneg;
3563 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3566 u32 ack_seq = TCP_SKB_CB(skb)->seq;
3564 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3567 u32 ack = TCP_SKB_CB(skb)->ack_seq;
3565 bool is_dupack = false; 3568 int num_dupack = 0;
3566 int prior_packets = tp->packets_out; 3569 int prior_packets = tp->packets_out;
3567 u32 delivered = tp->delivered; 3570 u32 delivered = tp->delivered;
3568 u32 lost = tp->lost; 3571 u32 lost = tp->lost;
@@ -3673,8 +3676,13 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3673 tcp_set_xmit_timer(sk); 3676 tcp_set_xmit_timer(sk);
3674 3677
3675 if (tcp_ack_is_dubious(sk, flag)) { 3678 if (tcp_ack_is_dubious(sk, flag)) {
3676 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3679 if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) {
3677 tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag, 3680 num_dupack = 1;
3681 /* Consider if pure acks were aggregated in tcp_add_backlog() */
3682 if (!(flag & FLAG_DATA))
3683 num_dupack = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
3684 }
3685 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
3678 &rexmit); 3686 &rexmit);
3679 } 3687 }
3680 3688
@@ -3692,7 +3700,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3692no_queue: 3700no_queue:
3693 /* If data was DSACKed, see if we can undo a cwnd reduction. */ 3701 /* If data was DSACKed, see if we can undo a cwnd reduction. */
3694 if (flag & FLAG_DSACKING_ACK) { 3702 if (flag & FLAG_DSACKING_ACK) {
3695 tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag, 3703 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
3696 &rexmit); 3704 &rexmit);
3697 tcp_newly_delivered(sk, delivered, flag); 3705 tcp_newly_delivered(sk, delivered, flag);
3698 } 3706 }
@@ -3717,7 +3725,7 @@ old_ack:
3717 if (TCP_SKB_CB(skb)->sacked) { 3725 if (TCP_SKB_CB(skb)->sacked) {
3718 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, 3726 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
3719 &sack_state); 3727 &sack_state);
3720 tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag, 3728 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
3721 &rexmit); 3729 &rexmit);
3722 tcp_newly_delivered(sk, delivered, flag); 3730 tcp_newly_delivered(sk, delivered, flag);
3723 tcp_xmit_recovery(sk, rexmit); 3731 tcp_xmit_recovery(sk, rexmit);