aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e886e2f7fa8d..257b61789eeb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -85,7 +85,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
85EXPORT_SYMBOL(sysctl_tcp_ecn); 85EXPORT_SYMBOL(sysctl_tcp_ecn);
86int sysctl_tcp_dsack __read_mostly = 1; 86int sysctl_tcp_dsack __read_mostly = 1;
87int sysctl_tcp_app_win __read_mostly = 31; 87int sysctl_tcp_app_win __read_mostly = 31;
88int sysctl_tcp_adv_win_scale __read_mostly = 2; 88int sysctl_tcp_adv_win_scale __read_mostly = 1;
89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
90 90
91int sysctl_tcp_stdurg __read_mostly; 91int sysctl_tcp_stdurg __read_mostly;
@@ -335,6 +335,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
335 incr = __tcp_grow_window(sk, skb); 335 incr = __tcp_grow_window(sk, skb);
336 336
337 if (incr) { 337 if (incr) {
338 incr = max_t(int, incr, 2 * skb->len);
338 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 339 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
339 tp->window_clamp); 340 tp->window_clamp);
340 inet_csk(sk)->icsk_ack.quick |= 1; 341 inet_csk(sk)->icsk_ack.quick |= 1;
@@ -474,8 +475,11 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
474 if (!win_dep) { 475 if (!win_dep) {
475 m -= (new_sample >> 3); 476 m -= (new_sample >> 3);
476 new_sample += m; 477 new_sample += m;
477 } else if (m < new_sample) 478 } else {
478 new_sample = m << 3; 479 m <<= 3;
480 if (m < new_sample)
481 new_sample = m;
482 }
479 } else { 483 } else {
480 /* No previous measure. */ 484 /* No previous measure. */
481 new_sample = m << 3; 485 new_sample = m << 3;
@@ -491,7 +495,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
491 goto new_measure; 495 goto new_measure;
492 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 496 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
493 return; 497 return;
494 tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1); 498 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
495 499
496new_measure: 500new_measure:
497 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 501 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
@@ -2864,11 +2868,14 @@ static inline void tcp_complete_cwr(struct sock *sk)
2864 2868
2865 /* Do not moderate cwnd if it's already undone in cwr or recovery. */ 2869 /* Do not moderate cwnd if it's already undone in cwr or recovery. */
2866 if (tp->undo_marker) { 2870 if (tp->undo_marker) {
2867 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) 2871 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
2868 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 2872 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
2869 else /* PRR */ 2873 tp->snd_cwnd_stamp = tcp_time_stamp;
2874 } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
2875 /* PRR algorithm. */
2870 tp->snd_cwnd = tp->snd_ssthresh; 2876 tp->snd_cwnd = tp->snd_ssthresh;
2871 tp->snd_cwnd_stamp = tcp_time_stamp; 2877 tp->snd_cwnd_stamp = tcp_time_stamp;
2878 }
2872 } 2879 }
2873 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2880 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2874} 2881}
@@ -5225,7 +5232,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5225 return 0; 5232 return 0;
5226 5233
5227 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5234 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
5228 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 5235 tp->ucopy.dma_chan = net_dma_find_channel();
5229 5236
5230 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 5237 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
5231 5238