diff options
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp_input.c | 53 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 50 |
2 files changed, 52 insertions, 51 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 097294b7da3e..8085704863fb 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1002,7 +1002,8 @@ static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) | |||
1002 | } | 1002 | } |
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb) | 1005 | static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, |
1006 | struct sk_buff *skb) | ||
1006 | { | 1007 | { |
1007 | tcp_verify_retransmit_hint(tp, skb); | 1008 | tcp_verify_retransmit_hint(tp, skb); |
1008 | 1009 | ||
@@ -2559,6 +2560,56 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb) | |||
2559 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); | 2560 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
2560 | } | 2561 | } |
2561 | 2562 | ||
2563 | /* Do a simple retransmit without using the backoff mechanisms in | ||
2564 | * tcp_timer. This is used for path mtu discovery. | ||
2565 | * The socket is already locked here. | ||
2566 | */ | ||
2567 | void tcp_simple_retransmit(struct sock *sk) | ||
2568 | { | ||
2569 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
2570 | struct tcp_sock *tp = tcp_sk(sk); | ||
2571 | struct sk_buff *skb; | ||
2572 | unsigned int mss = tcp_current_mss(sk, 0); | ||
2573 | u32 prior_lost = tp->lost_out; | ||
2574 | |||
2575 | tcp_for_write_queue(skb, sk) { | ||
2576 | if (skb == tcp_send_head(sk)) | ||
2577 | break; | ||
2578 | if (skb->len > mss && | ||
2579 | !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { | ||
2580 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { | ||
2581 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; | ||
2582 | tp->retrans_out -= tcp_skb_pcount(skb); | ||
2583 | } | ||
2584 | tcp_skb_mark_lost_uncond_verify(tp, skb); | ||
2585 | } | ||
2586 | } | ||
2587 | |||
2588 | tcp_clear_retrans_hints_partial(tp); | ||
2589 | |||
2590 | if (prior_lost == tp->lost_out) | ||
2591 | return; | ||
2592 | |||
2593 | if (tcp_is_reno(tp)) | ||
2594 | tcp_limit_reno_sacked(tp); | ||
2595 | |||
2596 | tcp_verify_left_out(tp); | ||
2597 | |||
2598 | /* Don't muck with the congestion window here. | ||
2599 | * Reason is that we do not increase amount of _data_ | ||
2600 | * in network, but units changed and effective | ||
2601 | * cwnd/ssthresh really reduced now. | ||
2602 | */ | ||
2603 | if (icsk->icsk_ca_state != TCP_CA_Loss) { | ||
2604 | tp->high_seq = tp->snd_nxt; | ||
2605 | tp->snd_ssthresh = tcp_current_ssthresh(sk); | ||
2606 | tp->prior_ssthresh = 0; | ||
2607 | tp->undo_marker = 0; | ||
2608 | tcp_set_ca_state(sk, TCP_CA_Loss); | ||
2609 | } | ||
2610 | tcp_xmit_retransmit_queue(sk); | ||
2611 | } | ||
2612 | |||
2562 | /* Process an event, which can update packets-in-flight not trivially. | 2613 | /* Process an event, which can update packets-in-flight not trivially. |
2563 | * Main goal of this function is to calculate new estimate for left_out, | 2614 | * Main goal of this function is to calculate new estimate for left_out, |
2564 | * taking into account both packets sitting in receiver's buffer and | 2615 | * taking into account both packets sitting in receiver's buffer and |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 86ef98975e94..c069ecb81ea5 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1879,56 +1879,6 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, | |||
1879 | } | 1879 | } |
1880 | } | 1880 | } |
1881 | 1881 | ||
1882 | /* Do a simple retransmit without using the backoff mechanisms in | ||
1883 | * tcp_timer. This is used for path mtu discovery. | ||
1884 | * The socket is already locked here. | ||
1885 | */ | ||
1886 | void tcp_simple_retransmit(struct sock *sk) | ||
1887 | { | ||
1888 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
1889 | struct tcp_sock *tp = tcp_sk(sk); | ||
1890 | struct sk_buff *skb; | ||
1891 | unsigned int mss = tcp_current_mss(sk, 0); | ||
1892 | u32 prior_lost = tp->lost_out; | ||
1893 | |||
1894 | tcp_for_write_queue(skb, sk) { | ||
1895 | if (skb == tcp_send_head(sk)) | ||
1896 | break; | ||
1897 | if (skb->len > mss && | ||
1898 | !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { | ||
1899 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { | ||
1900 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; | ||
1901 | tp->retrans_out -= tcp_skb_pcount(skb); | ||
1902 | } | ||
1903 | tcp_skb_mark_lost_uncond_verify(tp, skb); | ||
1904 | } | ||
1905 | } | ||
1906 | |||
1907 | tcp_clear_retrans_hints_partial(tp); | ||
1908 | |||
1909 | if (prior_lost == tp->lost_out) | ||
1910 | return; | ||
1911 | |||
1912 | if (tcp_is_reno(tp)) | ||
1913 | tcp_limit_reno_sacked(tp); | ||
1914 | |||
1915 | tcp_verify_left_out(tp); | ||
1916 | |||
1917 | /* Don't muck with the congestion window here. | ||
1918 | * Reason is that we do not increase amount of _data_ | ||
1919 | * in network, but units changed and effective | ||
1920 | * cwnd/ssthresh really reduced now. | ||
1921 | */ | ||
1922 | if (icsk->icsk_ca_state != TCP_CA_Loss) { | ||
1923 | tp->high_seq = tp->snd_nxt; | ||
1924 | tp->snd_ssthresh = tcp_current_ssthresh(sk); | ||
1925 | tp->prior_ssthresh = 0; | ||
1926 | tp->undo_marker = 0; | ||
1927 | tcp_set_ca_state(sk, TCP_CA_Loss); | ||
1928 | } | ||
1929 | tcp_xmit_retransmit_queue(sk); | ||
1930 | } | ||
1931 | |||
1932 | /* This retransmits one SKB. Policy decisions and retransmit queue | 1882 | /* This retransmits one SKB. Policy decisions and retransmit queue |
1933 | * state updates are done by the caller. Returns non-zero if an | 1883 | * state updates are done by the caller. Returns non-zero if an |
1934 | * error occurred which prevented the send. | 1884 | * error occurred which prevented the send. |