diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 97 |
1 files changed, 34 insertions, 63 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 26c936930e92..cad73b7dfef0 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1392,9 +1392,9 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, | |||
1392 | 1392 | ||
1393 | if (before(next_dup->start_seq, skip_to_seq)) { | 1393 | if (before(next_dup->start_seq, skip_to_seq)) { |
1394 | skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count); | 1394 | skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count); |
1395 | tcp_sacktag_walk(skb, sk, NULL, | 1395 | skb = tcp_sacktag_walk(skb, sk, NULL, |
1396 | next_dup->start_seq, next_dup->end_seq, | 1396 | next_dup->start_seq, next_dup->end_seq, |
1397 | 1, fack_count, reord, flag); | 1397 | 1, fack_count, reord, flag); |
1398 | } | 1398 | } |
1399 | 1399 | ||
1400 | return skb; | 1400 | return skb; |
@@ -1842,9 +1842,16 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) | |||
1842 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; | 1842 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; |
1843 | } | 1843 | } |
1844 | 1844 | ||
1845 | /* Don't lost mark skbs that were fwd transmitted after RTO */ | 1845 | /* Marking forward transmissions that were made after RTO lost |
1846 | if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) && | 1846 | * can cause unnecessary retransmissions in some scenarios, |
1847 | !after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) { | 1847 | * SACK blocks will mitigate that in some but not in all cases. |
1848 | * We used to not mark them but it was causing break-ups with | ||
1849 | * receivers that do only in-order receival. | ||
1850 | * | ||
1851 | * TODO: we could detect presence of such receiver and select | ||
1852 | * different behavior per flow. | ||
1853 | */ | ||
1854 | if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { | ||
1848 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | 1855 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
1849 | tp->lost_out += tcp_skb_pcount(skb); | 1856 | tp->lost_out += tcp_skb_pcount(skb); |
1850 | } | 1857 | } |
@@ -1860,7 +1867,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) | |||
1860 | tp->reordering = min_t(unsigned int, tp->reordering, | 1867 | tp->reordering = min_t(unsigned int, tp->reordering, |
1861 | sysctl_tcp_reordering); | 1868 | sysctl_tcp_reordering); |
1862 | tcp_set_ca_state(sk, TCP_CA_Loss); | 1869 | tcp_set_ca_state(sk, TCP_CA_Loss); |
1863 | tp->high_seq = tp->frto_highmark; | 1870 | tp->high_seq = tp->snd_nxt; |
1864 | TCP_ECN_queue_cwr(tp); | 1871 | TCP_ECN_queue_cwr(tp); |
1865 | 1872 | ||
1866 | tcp_clear_retrans_hints_partial(tp); | 1873 | tcp_clear_retrans_hints_partial(tp); |
@@ -2476,28 +2483,34 @@ static inline void tcp_complete_cwr(struct sock *sk) | |||
2476 | tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); | 2483 | tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); |
2477 | } | 2484 | } |
2478 | 2485 | ||
2486 | static void tcp_try_keep_open(struct sock *sk) | ||
2487 | { | ||
2488 | struct tcp_sock *tp = tcp_sk(sk); | ||
2489 | int state = TCP_CA_Open; | ||
2490 | |||
2491 | if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker) | ||
2492 | state = TCP_CA_Disorder; | ||
2493 | |||
2494 | if (inet_csk(sk)->icsk_ca_state != state) { | ||
2495 | tcp_set_ca_state(sk, state); | ||
2496 | tp->high_seq = tp->snd_nxt; | ||
2497 | } | ||
2498 | } | ||
2499 | |||
2479 | static void tcp_try_to_open(struct sock *sk, int flag) | 2500 | static void tcp_try_to_open(struct sock *sk, int flag) |
2480 | { | 2501 | { |
2481 | struct tcp_sock *tp = tcp_sk(sk); | 2502 | struct tcp_sock *tp = tcp_sk(sk); |
2482 | 2503 | ||
2483 | tcp_verify_left_out(tp); | 2504 | tcp_verify_left_out(tp); |
2484 | 2505 | ||
2485 | if (tp->retrans_out == 0) | 2506 | if (!tp->frto_counter && tp->retrans_out == 0) |
2486 | tp->retrans_stamp = 0; | 2507 | tp->retrans_stamp = 0; |
2487 | 2508 | ||
2488 | if (flag & FLAG_ECE) | 2509 | if (flag & FLAG_ECE) |
2489 | tcp_enter_cwr(sk, 1); | 2510 | tcp_enter_cwr(sk, 1); |
2490 | 2511 | ||
2491 | if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { | 2512 | if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { |
2492 | int state = TCP_CA_Open; | 2513 | tcp_try_keep_open(sk); |
2493 | |||
2494 | if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker) | ||
2495 | state = TCP_CA_Disorder; | ||
2496 | |||
2497 | if (inet_csk(sk)->icsk_ca_state != state) { | ||
2498 | tcp_set_ca_state(sk, state); | ||
2499 | tp->high_seq = tp->snd_nxt; | ||
2500 | } | ||
2501 | tcp_moderate_cwnd(tp); | 2514 | tcp_moderate_cwnd(tp); |
2502 | } else { | 2515 | } else { |
2503 | tcp_cwnd_down(sk, flag); | 2516 | tcp_cwnd_down(sk, flag); |
@@ -3303,8 +3316,11 @@ no_queue: | |||
3303 | return 1; | 3316 | return 1; |
3304 | 3317 | ||
3305 | old_ack: | 3318 | old_ack: |
3306 | if (TCP_SKB_CB(skb)->sacked) | 3319 | if (TCP_SKB_CB(skb)->sacked) { |
3307 | tcp_sacktag_write_queue(sk, skb, prior_snd_una); | 3320 | tcp_sacktag_write_queue(sk, skb, prior_snd_una); |
3321 | if (icsk->icsk_ca_state == TCP_CA_Open) | ||
3322 | tcp_try_keep_open(sk); | ||
3323 | } | ||
3308 | 3324 | ||
3309 | uninteresting_ack: | 3325 | uninteresting_ack: |
3310 | SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt); | 3326 | SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt); |
@@ -4525,49 +4541,6 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) | |||
4525 | } | 4541 | } |
4526 | } | 4542 | } |
4527 | 4543 | ||
4528 | static int tcp_defer_accept_check(struct sock *sk) | ||
4529 | { | ||
4530 | struct tcp_sock *tp = tcp_sk(sk); | ||
4531 | |||
4532 | if (tp->defer_tcp_accept.request) { | ||
4533 | int queued_data = tp->rcv_nxt - tp->copied_seq; | ||
4534 | int hasfin = !skb_queue_empty(&sk->sk_receive_queue) ? | ||
4535 | tcp_hdr((struct sk_buff *) | ||
4536 | sk->sk_receive_queue.prev)->fin : 0; | ||
4537 | |||
4538 | if (queued_data && hasfin) | ||
4539 | queued_data--; | ||
4540 | |||
4541 | if (queued_data && | ||
4542 | tp->defer_tcp_accept.listen_sk->sk_state == TCP_LISTEN) { | ||
4543 | if (sock_flag(sk, SOCK_KEEPOPEN)) { | ||
4544 | inet_csk_reset_keepalive_timer(sk, | ||
4545 | keepalive_time_when(tp)); | ||
4546 | } else { | ||
4547 | inet_csk_delete_keepalive_timer(sk); | ||
4548 | } | ||
4549 | |||
4550 | inet_csk_reqsk_queue_add( | ||
4551 | tp->defer_tcp_accept.listen_sk, | ||
4552 | tp->defer_tcp_accept.request, | ||
4553 | sk); | ||
4554 | |||
4555 | tp->defer_tcp_accept.listen_sk->sk_data_ready( | ||
4556 | tp->defer_tcp_accept.listen_sk, 0); | ||
4557 | |||
4558 | sock_put(tp->defer_tcp_accept.listen_sk); | ||
4559 | sock_put(sk); | ||
4560 | tp->defer_tcp_accept.listen_sk = NULL; | ||
4561 | tp->defer_tcp_accept.request = NULL; | ||
4562 | } else if (hasfin || | ||
4563 | tp->defer_tcp_accept.listen_sk->sk_state != TCP_LISTEN) { | ||
4564 | tcp_reset(sk); | ||
4565 | return -1; | ||
4566 | } | ||
4567 | } | ||
4568 | return 0; | ||
4569 | } | ||
4570 | |||
4571 | static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) | 4544 | static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) |
4572 | { | 4545 | { |
4573 | struct tcp_sock *tp = tcp_sk(sk); | 4546 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -4928,8 +4901,6 @@ step5: | |||
4928 | 4901 | ||
4929 | tcp_data_snd_check(sk); | 4902 | tcp_data_snd_check(sk); |
4930 | tcp_ack_snd_check(sk); | 4903 | tcp_ack_snd_check(sk); |
4931 | |||
4932 | tcp_defer_accept_check(sk); | ||
4933 | return 0; | 4904 | return 0; |
4934 | 4905 | ||
4935 | csum_error: | 4906 | csum_error: |