diff options
author | Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> | 2007-12-31 17:57:14 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 18:00:25 -0500 |
commit | 056834d9f6f6eaf4cc7268569e53acab957aac27 (patch) | |
tree | 86d8ae96566cc004e2737b8a9975fcd3e651e923 /net/ipv4 | |
parent | 058dc3342b71ffb3531c4f9df7c35f943f392b8d (diff) |
[TCP]: cleanup tcp_{in,out}put.c style
These were manually selected from indent's results which as is
are too noisy to be of any use without human reason. In addition,
some extra newlines between function and its comment were removed
too.
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp_input.c | 436 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 146 |
2 files changed, 300 insertions, 282 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1e7fd8113663..18e099c6fa62 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -121,8 +121,7 @@ int sysctl_tcp_abc __read_mostly; | |||
121 | /* Adapt the MSS value used to make delayed ack decision to the | 121 | /* Adapt the MSS value used to make delayed ack decision to the |
122 | * real world. | 122 | * real world. |
123 | */ | 123 | */ |
124 | static void tcp_measure_rcv_mss(struct sock *sk, | 124 | static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) |
125 | const struct sk_buff *skb) | ||
126 | { | 125 | { |
127 | struct inet_connection_sock *icsk = inet_csk(sk); | 126 | struct inet_connection_sock *icsk = inet_csk(sk); |
128 | const unsigned int lss = icsk->icsk_ack.last_seg_size; | 127 | const unsigned int lss = icsk->icsk_ack.last_seg_size; |
@@ -133,7 +132,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, | |||
133 | /* skb->len may jitter because of SACKs, even if peer | 132 | /* skb->len may jitter because of SACKs, even if peer |
134 | * sends good full-sized frames. | 133 | * sends good full-sized frames. |
135 | */ | 134 | */ |
136 | len = skb_shinfo(skb)->gso_size ?: skb->len; | 135 | len = skb_shinfo(skb)->gso_size ? : skb->len; |
137 | if (len >= icsk->icsk_ack.rcv_mss) { | 136 | if (len >= icsk->icsk_ack.rcv_mss) { |
138 | icsk->icsk_ack.rcv_mss = len; | 137 | icsk->icsk_ack.rcv_mss = len; |
139 | } else { | 138 | } else { |
@@ -173,8 +172,8 @@ static void tcp_incr_quickack(struct sock *sk) | |||
173 | struct inet_connection_sock *icsk = inet_csk(sk); | 172 | struct inet_connection_sock *icsk = inet_csk(sk); |
174 | unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); | 173 | unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); |
175 | 174 | ||
176 | if (quickacks==0) | 175 | if (quickacks == 0) |
177 | quickacks=2; | 176 | quickacks = 2; |
178 | if (quickacks > icsk->icsk_ack.quick) | 177 | if (quickacks > icsk->icsk_ack.quick) |
179 | icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); | 178 | icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); |
180 | } | 179 | } |
@@ -199,7 +198,7 @@ static inline int tcp_in_quickack_mode(const struct sock *sk) | |||
199 | 198 | ||
200 | static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) | 199 | static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) |
201 | { | 200 | { |
202 | if (tp->ecn_flags&TCP_ECN_OK) | 201 | if (tp->ecn_flags & TCP_ECN_OK) |
203 | tp->ecn_flags |= TCP_ECN_QUEUE_CWR; | 202 | tp->ecn_flags |= TCP_ECN_QUEUE_CWR; |
204 | } | 203 | } |
205 | 204 | ||
@@ -216,7 +215,7 @@ static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp) | |||
216 | 215 | ||
217 | static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb) | 216 | static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb) |
218 | { | 217 | { |
219 | if (tp->ecn_flags&TCP_ECN_OK) { | 218 | if (tp->ecn_flags & TCP_ECN_OK) { |
220 | if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags)) | 219 | if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags)) |
221 | tp->ecn_flags |= TCP_ECN_DEMAND_CWR; | 220 | tp->ecn_flags |= TCP_ECN_DEMAND_CWR; |
222 | /* Funny extension: if ECT is not set on a segment, | 221 | /* Funny extension: if ECT is not set on a segment, |
@@ -229,19 +228,19 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb) | |||
229 | 228 | ||
230 | static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th) | 229 | static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th) |
231 | { | 230 | { |
232 | if ((tp->ecn_flags&TCP_ECN_OK) && (!th->ece || th->cwr)) | 231 | if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) |
233 | tp->ecn_flags &= ~TCP_ECN_OK; | 232 | tp->ecn_flags &= ~TCP_ECN_OK; |
234 | } | 233 | } |
235 | 234 | ||
236 | static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th) | 235 | static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th) |
237 | { | 236 | { |
238 | if ((tp->ecn_flags&TCP_ECN_OK) && (!th->ece || !th->cwr)) | 237 | if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) |
239 | tp->ecn_flags &= ~TCP_ECN_OK; | 238 | tp->ecn_flags &= ~TCP_ECN_OK; |
240 | } | 239 | } |
241 | 240 | ||
242 | static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th) | 241 | static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th) |
243 | { | 242 | { |
244 | if (th->ece && !th->syn && (tp->ecn_flags&TCP_ECN_OK)) | 243 | if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) |
245 | return 1; | 244 | return 1; |
246 | return 0; | 245 | return 0; |
247 | } | 246 | } |
@@ -303,8 +302,7 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) | |||
303 | return 0; | 302 | return 0; |
304 | } | 303 | } |
305 | 304 | ||
306 | static void tcp_grow_window(struct sock *sk, | 305 | static void tcp_grow_window(struct sock *sk, struct sk_buff *skb) |
307 | struct sk_buff *skb) | ||
308 | { | 306 | { |
309 | struct tcp_sock *tp = tcp_sk(sk); | 307 | struct tcp_sock *tp = tcp_sk(sk); |
310 | 308 | ||
@@ -318,12 +316,13 @@ static void tcp_grow_window(struct sock *sk, | |||
318 | * will fit to rcvbuf in future. | 316 | * will fit to rcvbuf in future. |
319 | */ | 317 | */ |
320 | if (tcp_win_from_space(skb->truesize) <= skb->len) | 318 | if (tcp_win_from_space(skb->truesize) <= skb->len) |
321 | incr = 2*tp->advmss; | 319 | incr = 2 * tp->advmss; |
322 | else | 320 | else |
323 | incr = __tcp_grow_window(sk, skb); | 321 | incr = __tcp_grow_window(sk, skb); |
324 | 322 | ||
325 | if (incr) { | 323 | if (incr) { |
326 | tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); | 324 | tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, |
325 | tp->window_clamp); | ||
327 | inet_csk(sk)->icsk_ack.quick |= 1; | 326 | inet_csk(sk)->icsk_ack.quick |= 1; |
328 | } | 327 | } |
329 | } | 328 | } |
@@ -398,10 +397,9 @@ static void tcp_clamp_window(struct sock *sk) | |||
398 | sysctl_tcp_rmem[2]); | 397 | sysctl_tcp_rmem[2]); |
399 | } | 398 | } |
400 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) | 399 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) |
401 | tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss); | 400 | tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); |
402 | } | 401 | } |
403 | 402 | ||
404 | |||
405 | /* Initialize RCV_MSS value. | 403 | /* Initialize RCV_MSS value. |
406 | * RCV_MSS is an our guess about MSS used by the peer. | 404 | * RCV_MSS is an our guess about MSS used by the peer. |
407 | * We haven't any direct information about the MSS. | 405 | * We haven't any direct information about the MSS. |
@@ -414,7 +412,7 @@ void tcp_initialize_rcv_mss(struct sock *sk) | |||
414 | struct tcp_sock *tp = tcp_sk(sk); | 412 | struct tcp_sock *tp = tcp_sk(sk); |
415 | unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); | 413 | unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); |
416 | 414 | ||
417 | hint = min(hint, tp->rcv_wnd/2); | 415 | hint = min(hint, tp->rcv_wnd / 2); |
418 | hint = min(hint, TCP_MIN_RCVMSS); | 416 | hint = min(hint, TCP_MIN_RCVMSS); |
419 | hint = max(hint, TCP_MIN_MSS); | 417 | hint = max(hint, TCP_MIN_MSS); |
420 | 418 | ||
@@ -471,16 +469,15 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) | |||
471 | goto new_measure; | 469 | goto new_measure; |
472 | if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) | 470 | if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) |
473 | return; | 471 | return; |
474 | tcp_rcv_rtt_update(tp, | 472 | tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1); |
475 | jiffies - tp->rcv_rtt_est.time, | ||
476 | 1); | ||
477 | 473 | ||
478 | new_measure: | 474 | new_measure: |
479 | tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; | 475 | tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; |
480 | tp->rcv_rtt_est.time = tcp_time_stamp; | 476 | tp->rcv_rtt_est.time = tcp_time_stamp; |
481 | } | 477 | } |
482 | 478 | ||
483 | static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb) | 479 | static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, |
480 | const struct sk_buff *skb) | ||
484 | { | 481 | { |
485 | struct tcp_sock *tp = tcp_sk(sk); | 482 | struct tcp_sock *tp = tcp_sk(sk); |
486 | if (tp->rx_opt.rcv_tsecr && | 483 | if (tp->rx_opt.rcv_tsecr && |
@@ -503,8 +500,7 @@ void tcp_rcv_space_adjust(struct sock *sk) | |||
503 | goto new_measure; | 500 | goto new_measure; |
504 | 501 | ||
505 | time = tcp_time_stamp - tp->rcvq_space.time; | 502 | time = tcp_time_stamp - tp->rcvq_space.time; |
506 | if (time < (tp->rcv_rtt_est.rtt >> 3) || | 503 | if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) |
507 | tp->rcv_rtt_est.rtt == 0) | ||
508 | return; | 504 | return; |
509 | 505 | ||
510 | space = 2 * (tp->copied_seq - tp->rcvq_space.seq); | 506 | space = 2 * (tp->copied_seq - tp->rcvq_space.seq); |
@@ -580,7 +576,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) | |||
580 | } else { | 576 | } else { |
581 | int m = now - icsk->icsk_ack.lrcvtime; | 577 | int m = now - icsk->icsk_ack.lrcvtime; |
582 | 578 | ||
583 | if (m <= TCP_ATO_MIN/2) { | 579 | if (m <= TCP_ATO_MIN / 2) { |
584 | /* The fastest case is the first. */ | 580 | /* The fastest case is the first. */ |
585 | icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; | 581 | icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; |
586 | } else if (m < icsk->icsk_ack.ato) { | 582 | } else if (m < icsk->icsk_ack.ato) { |
@@ -609,7 +605,7 @@ static u32 tcp_rto_min(struct sock *sk) | |||
609 | u32 rto_min = TCP_RTO_MIN; | 605 | u32 rto_min = TCP_RTO_MIN; |
610 | 606 | ||
611 | if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) | 607 | if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) |
612 | rto_min = dst->metrics[RTAX_RTO_MIN-1]; | 608 | rto_min = dst->metrics[RTAX_RTO_MIN - 1]; |
613 | return rto_min; | 609 | return rto_min; |
614 | } | 610 | } |
615 | 611 | ||
@@ -672,14 +668,14 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) | |||
672 | } | 668 | } |
673 | if (after(tp->snd_una, tp->rtt_seq)) { | 669 | if (after(tp->snd_una, tp->rtt_seq)) { |
674 | if (tp->mdev_max < tp->rttvar) | 670 | if (tp->mdev_max < tp->rttvar) |
675 | tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2; | 671 | tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2; |
676 | tp->rtt_seq = tp->snd_nxt; | 672 | tp->rtt_seq = tp->snd_nxt; |
677 | tp->mdev_max = tcp_rto_min(sk); | 673 | tp->mdev_max = tcp_rto_min(sk); |
678 | } | 674 | } |
679 | } else { | 675 | } else { |
680 | /* no previous measure. */ | 676 | /* no previous measure. */ |
681 | tp->srtt = m<<3; /* take the measured time to be rtt */ | 677 | tp->srtt = m << 3; /* take the measured time to be rtt */ |
682 | tp->mdev = m<<1; /* make sure rto = 3*rtt */ | 678 | tp->mdev = m << 1; /* make sure rto = 3*rtt */ |
683 | tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); | 679 | tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); |
684 | tp->rtt_seq = tp->snd_nxt; | 680 | tp->rtt_seq = tp->snd_nxt; |
685 | } | 681 | } |
@@ -733,7 +729,7 @@ void tcp_update_metrics(struct sock *sk) | |||
733 | 729 | ||
734 | dst_confirm(dst); | 730 | dst_confirm(dst); |
735 | 731 | ||
736 | if (dst && (dst->flags&DST_HOST)) { | 732 | if (dst && (dst->flags & DST_HOST)) { |
737 | const struct inet_connection_sock *icsk = inet_csk(sk); | 733 | const struct inet_connection_sock *icsk = inet_csk(sk); |
738 | int m; | 734 | int m; |
739 | 735 | ||
@@ -743,7 +739,7 @@ void tcp_update_metrics(struct sock *sk) | |||
743 | * Reset our results. | 739 | * Reset our results. |
744 | */ | 740 | */ |
745 | if (!(dst_metric_locked(dst, RTAX_RTT))) | 741 | if (!(dst_metric_locked(dst, RTAX_RTT))) |
746 | dst->metrics[RTAX_RTT-1] = 0; | 742 | dst->metrics[RTAX_RTT - 1] = 0; |
747 | return; | 743 | return; |
748 | } | 744 | } |
749 | 745 | ||
@@ -755,9 +751,9 @@ void tcp_update_metrics(struct sock *sk) | |||
755 | */ | 751 | */ |
756 | if (!(dst_metric_locked(dst, RTAX_RTT))) { | 752 | if (!(dst_metric_locked(dst, RTAX_RTT))) { |
757 | if (m <= 0) | 753 | if (m <= 0) |
758 | dst->metrics[RTAX_RTT-1] = tp->srtt; | 754 | dst->metrics[RTAX_RTT - 1] = tp->srtt; |
759 | else | 755 | else |
760 | dst->metrics[RTAX_RTT-1] -= (m>>3); | 756 | dst->metrics[RTAX_RTT - 1] -= (m >> 3); |
761 | } | 757 | } |
762 | 758 | ||
763 | if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { | 759 | if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { |
@@ -770,7 +766,7 @@ void tcp_update_metrics(struct sock *sk) | |||
770 | m = tp->mdev; | 766 | m = tp->mdev; |
771 | 767 | ||
772 | if (m >= dst_metric(dst, RTAX_RTTVAR)) | 768 | if (m >= dst_metric(dst, RTAX_RTTVAR)) |
773 | dst->metrics[RTAX_RTTVAR-1] = m; | 769 | dst->metrics[RTAX_RTTVAR - 1] = m; |
774 | else | 770 | else |
775 | dst->metrics[RTAX_RTTVAR-1] -= | 771 | dst->metrics[RTAX_RTTVAR-1] -= |
776 | (dst->metrics[RTAX_RTTVAR-1] - m)>>2; | 772 | (dst->metrics[RTAX_RTTVAR-1] - m)>>2; |
@@ -784,7 +780,7 @@ void tcp_update_metrics(struct sock *sk) | |||
784 | dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1; | 780 | dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1; |
785 | if (!dst_metric_locked(dst, RTAX_CWND) && | 781 | if (!dst_metric_locked(dst, RTAX_CWND) && |
786 | tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) | 782 | tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) |
787 | dst->metrics[RTAX_CWND-1] = tp->snd_cwnd; | 783 | dst->metrics[RTAX_CWND - 1] = tp->snd_cwnd; |
788 | } else if (tp->snd_cwnd > tp->snd_ssthresh && | 784 | } else if (tp->snd_cwnd > tp->snd_ssthresh && |
789 | icsk->icsk_ca_state == TCP_CA_Open) { | 785 | icsk->icsk_ca_state == TCP_CA_Open) { |
790 | /* Cong. avoidance phase, cwnd is reliable. */ | 786 | /* Cong. avoidance phase, cwnd is reliable. */ |
@@ -1353,12 +1349,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, | |||
1353 | } | 1349 | } |
1354 | 1350 | ||
1355 | if (in_sack <= 0) | 1351 | if (in_sack <= 0) |
1356 | in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq); | 1352 | in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, |
1353 | end_seq); | ||
1357 | if (unlikely(in_sack < 0)) | 1354 | if (unlikely(in_sack < 0)) |
1358 | break; | 1355 | break; |
1359 | 1356 | ||
1360 | if (in_sack) | 1357 | if (in_sack) |
1361 | *flag |= tcp_sacktag_one(skb, sk, reord, dup_sack, *fack_count); | 1358 | *flag |= tcp_sacktag_one(skb, sk, reord, dup_sack, |
1359 | *fack_count); | ||
1362 | 1360 | ||
1363 | *fack_count += tcp_skb_pcount(skb); | 1361 | *fack_count += tcp_skb_pcount(skb); |
1364 | } | 1362 | } |
@@ -1407,7 +1405,8 @@ static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache) | |||
1407 | } | 1405 | } |
1408 | 1406 | ||
1409 | static int | 1407 | static int |
1410 | tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una) | 1408 | tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, |
1409 | u32 prior_snd_una) | ||
1411 | { | 1410 | { |
1412 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1411 | const struct inet_connection_sock *icsk = inet_csk(sk); |
1413 | struct tcp_sock *tp = tcp_sk(sk); | 1412 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -1417,7 +1416,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1417 | struct tcp_sack_block sp[4]; | 1416 | struct tcp_sack_block sp[4]; |
1418 | struct tcp_sack_block *cache; | 1417 | struct tcp_sack_block *cache; |
1419 | struct sk_buff *skb; | 1418 | struct sk_buff *skb; |
1420 | int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3; | 1419 | int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE) >> 3; |
1421 | int used_sacks; | 1420 | int used_sacks; |
1422 | int reord = tp->packets_out; | 1421 | int reord = tp->packets_out; |
1423 | int flag = 0; | 1422 | int flag = 0; |
@@ -1484,17 +1483,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1484 | 1483 | ||
1485 | /* order SACK blocks to allow in order walk of the retrans queue */ | 1484 | /* order SACK blocks to allow in order walk of the retrans queue */ |
1486 | for (i = used_sacks - 1; i > 0; i--) { | 1485 | for (i = used_sacks - 1; i > 0; i--) { |
1487 | for (j = 0; j < i; j++){ | 1486 | for (j = 0; j < i; j++) { |
1488 | if (after(sp[j].start_seq, sp[j+1].start_seq)) { | 1487 | if (after(sp[j].start_seq, sp[j + 1].start_seq)) { |
1489 | struct tcp_sack_block tmp; | 1488 | struct tcp_sack_block tmp; |
1490 | 1489 | ||
1491 | tmp = sp[j]; | 1490 | tmp = sp[j]; |
1492 | sp[j] = sp[j+1]; | 1491 | sp[j] = sp[j + 1]; |
1493 | sp[j+1] = tmp; | 1492 | sp[j + 1] = tmp; |
1494 | 1493 | ||
1495 | /* Track where the first SACK block goes to */ | 1494 | /* Track where the first SACK block goes to */ |
1496 | if (j == first_sack_index) | 1495 | if (j == first_sack_index) |
1497 | first_sack_index = j+1; | 1496 | first_sack_index = j + 1; |
1498 | } | 1497 | } |
1499 | } | 1498 | } |
1500 | } | 1499 | } |
@@ -1539,17 +1538,21 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1539 | /* Head todo? */ | 1538 | /* Head todo? */ |
1540 | if (before(start_seq, cache->start_seq)) { | 1539 | if (before(start_seq, cache->start_seq)) { |
1541 | skb = tcp_sacktag_skip(skb, sk, start_seq); | 1540 | skb = tcp_sacktag_skip(skb, sk, start_seq); |
1542 | skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, | 1541 | skb = tcp_sacktag_walk(skb, sk, next_dup, |
1543 | cache->start_seq, dup_sack, | 1542 | start_seq, |
1544 | &fack_count, &reord, &flag); | 1543 | cache->start_seq, |
1544 | dup_sack, &fack_count, | ||
1545 | &reord, &flag); | ||
1545 | } | 1546 | } |
1546 | 1547 | ||
1547 | /* Rest of the block already fully processed? */ | 1548 | /* Rest of the block already fully processed? */ |
1548 | if (!after(end_seq, cache->end_seq)) | 1549 | if (!after(end_seq, cache->end_seq)) |
1549 | goto advance_sp; | 1550 | goto advance_sp; |
1550 | 1551 | ||
1551 | skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, cache->end_seq, | 1552 | skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, |
1552 | &fack_count, &reord, &flag); | 1553 | cache->end_seq, |
1554 | &fack_count, &reord, | ||
1555 | &flag); | ||
1553 | 1556 | ||
1554 | /* ...tail remains todo... */ | 1557 | /* ...tail remains todo... */ |
1555 | if (tcp_highest_sack_seq(tp) == cache->end_seq) { | 1558 | if (tcp_highest_sack_seq(tp) == cache->end_seq) { |
@@ -1654,10 +1657,10 @@ static void tcp_remove_reno_sacks(struct sock *sk, int acked) | |||
1654 | 1657 | ||
1655 | if (acked > 0) { | 1658 | if (acked > 0) { |
1656 | /* One ACK acked hole. The rest eat duplicate ACKs. */ | 1659 | /* One ACK acked hole. The rest eat duplicate ACKs. */ |
1657 | if (acked-1 >= tp->sacked_out) | 1660 | if (acked - 1 >= tp->sacked_out) |
1658 | tp->sacked_out = 0; | 1661 | tp->sacked_out = 0; |
1659 | else | 1662 | else |
1660 | tp->sacked_out -= acked-1; | 1663 | tp->sacked_out -= acked - 1; |
1661 | } | 1664 | } |
1662 | tcp_check_reno_reordering(sk, acked); | 1665 | tcp_check_reno_reordering(sk, acked); |
1663 | tcp_verify_left_out(tp); | 1666 | tcp_verify_left_out(tp); |
@@ -1691,10 +1694,10 @@ int tcp_use_frto(struct sock *sk) | |||
1691 | tcp_for_write_queue_from(skb, sk) { | 1694 | tcp_for_write_queue_from(skb, sk) { |
1692 | if (skb == tcp_send_head(sk)) | 1695 | if (skb == tcp_send_head(sk)) |
1693 | break; | 1696 | break; |
1694 | if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS) | 1697 | if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) |
1695 | return 0; | 1698 | return 0; |
1696 | /* Short-circuit when first non-SACKed skb has been checked */ | 1699 | /* Short-circuit when first non-SACKed skb has been checked */ |
1697 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) | 1700 | if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) |
1698 | break; | 1701 | break; |
1699 | } | 1702 | } |
1700 | return 1; | 1703 | return 1; |
@@ -1804,7 +1807,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) | |||
1804 | * Count the retransmission made on RTO correctly (only when | 1807 | * Count the retransmission made on RTO correctly (only when |
1805 | * waiting for the first ACK and did not get it)... | 1808 | * waiting for the first ACK and did not get it)... |
1806 | */ | 1809 | */ |
1807 | if ((tp->frto_counter == 1) && !(flag&FLAG_DATA_ACKED)) { | 1810 | if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) { |
1808 | /* For some reason this R-bit might get cleared? */ | 1811 | /* For some reason this R-bit might get cleared? */ |
1809 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) | 1812 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) |
1810 | tp->retrans_out += tcp_skb_pcount(skb); | 1813 | tp->retrans_out += tcp_skb_pcount(skb); |
@@ -1817,7 +1820,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) | |||
1817 | } | 1820 | } |
1818 | 1821 | ||
1819 | /* Don't lost mark skbs that were fwd transmitted after RTO */ | 1822 | /* Don't lost mark skbs that were fwd transmitted after RTO */ |
1820 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) && | 1823 | if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) && |
1821 | !after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) { | 1824 | !after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) { |
1822 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | 1825 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
1823 | tp->lost_out += tcp_skb_pcount(skb); | 1826 | tp->lost_out += tcp_skb_pcount(skb); |
@@ -1832,7 +1835,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) | |||
1832 | tp->bytes_acked = 0; | 1835 | tp->bytes_acked = 0; |
1833 | 1836 | ||
1834 | tp->reordering = min_t(unsigned int, tp->reordering, | 1837 | tp->reordering = min_t(unsigned int, tp->reordering, |
1835 | sysctl_tcp_reordering); | 1838 | sysctl_tcp_reordering); |
1836 | tcp_set_ca_state(sk, TCP_CA_Loss); | 1839 | tcp_set_ca_state(sk, TCP_CA_Loss); |
1837 | tp->high_seq = tp->frto_highmark; | 1840 | tp->high_seq = tp->frto_highmark; |
1838 | TCP_ECN_queue_cwr(tp); | 1841 | TCP_ECN_queue_cwr(tp); |
@@ -1899,7 +1902,7 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
1899 | if (skb == tcp_send_head(sk)) | 1902 | if (skb == tcp_send_head(sk)) |
1900 | break; | 1903 | break; |
1901 | 1904 | ||
1902 | if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS) | 1905 | if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) |
1903 | tp->undo_marker = 0; | 1906 | tp->undo_marker = 0; |
1904 | TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; | 1907 | TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; |
1905 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { | 1908 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { |
@@ -1911,7 +1914,7 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
1911 | tcp_verify_left_out(tp); | 1914 | tcp_verify_left_out(tp); |
1912 | 1915 | ||
1913 | tp->reordering = min_t(unsigned int, tp->reordering, | 1916 | tp->reordering = min_t(unsigned int, tp->reordering, |
1914 | sysctl_tcp_reordering); | 1917 | sysctl_tcp_reordering); |
1915 | tcp_set_ca_state(sk, TCP_CA_Loss); | 1918 | tcp_set_ca_state(sk, TCP_CA_Loss); |
1916 | tp->high_seq = tp->snd_nxt; | 1919 | tp->high_seq = tp->snd_nxt; |
1917 | TCP_ECN_queue_cwr(tp); | 1920 | TCP_ECN_queue_cwr(tp); |
@@ -1943,7 +1946,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag) | |||
1943 | 1946 | ||
1944 | static inline int tcp_fackets_out(struct tcp_sock *tp) | 1947 | static inline int tcp_fackets_out(struct tcp_sock *tp) |
1945 | { | 1948 | { |
1946 | return tcp_is_reno(tp) ? tp->sacked_out+1 : tp->fackets_out; | 1949 | return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; |
1947 | } | 1950 | } |
1948 | 1951 | ||
1949 | /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs | 1952 | /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs |
@@ -2116,12 +2119,11 @@ static int tcp_time_to_recover(struct sock *sk) | |||
2116 | * retransmitted past LOST markings in the first place? I'm not fully sure | 2119 | * retransmitted past LOST markings in the first place? I'm not fully sure |
2117 | * about undo and end of connection cases, which can cause R without L? | 2120 | * about undo and end of connection cases, which can cause R without L? |
2118 | */ | 2121 | */ |
2119 | static void tcp_verify_retransmit_hint(struct tcp_sock *tp, | 2122 | static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) |
2120 | struct sk_buff *skb) | ||
2121 | { | 2123 | { |
2122 | if ((tp->retransmit_skb_hint != NULL) && | 2124 | if ((tp->retransmit_skb_hint != NULL) && |
2123 | before(TCP_SKB_CB(skb)->seq, | 2125 | before(TCP_SKB_CB(skb)->seq, |
2124 | TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) | 2126 | TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) |
2125 | tp->retransmit_skb_hint = NULL; | 2127 | tp->retransmit_skb_hint = NULL; |
2126 | } | 2128 | } |
2127 | 2129 | ||
@@ -2156,7 +2158,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int fast_rexmit) | |||
2156 | cnt += tcp_skb_pcount(skb); | 2158 | cnt += tcp_skb_pcount(skb); |
2157 | 2159 | ||
2158 | if (((!fast_rexmit || (tp->lost_out > 0)) && (cnt > packets)) || | 2160 | if (((!fast_rexmit || (tp->lost_out > 0)) && (cnt > packets)) || |
2159 | after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) | 2161 | after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) |
2160 | break; | 2162 | break; |
2161 | if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { | 2163 | if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { |
2162 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | 2164 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
@@ -2223,7 +2225,7 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) | |||
2223 | static inline void tcp_moderate_cwnd(struct tcp_sock *tp) | 2225 | static inline void tcp_moderate_cwnd(struct tcp_sock *tp) |
2224 | { | 2226 | { |
2225 | tp->snd_cwnd = min(tp->snd_cwnd, | 2227 | tp->snd_cwnd = min(tp->snd_cwnd, |
2226 | tcp_packets_in_flight(tp)+tcp_max_burst(tp)); | 2228 | tcp_packets_in_flight(tp) + tcp_max_burst(tp)); |
2227 | tp->snd_cwnd_stamp = tcp_time_stamp; | 2229 | tp->snd_cwnd_stamp = tcp_time_stamp; |
2228 | } | 2230 | } |
2229 | 2231 | ||
@@ -2243,15 +2245,15 @@ static void tcp_cwnd_down(struct sock *sk, int flag) | |||
2243 | struct tcp_sock *tp = tcp_sk(sk); | 2245 | struct tcp_sock *tp = tcp_sk(sk); |
2244 | int decr = tp->snd_cwnd_cnt + 1; | 2246 | int decr = tp->snd_cwnd_cnt + 1; |
2245 | 2247 | ||
2246 | if ((flag&(FLAG_ANY_PROGRESS|FLAG_DSACKING_ACK)) || | 2248 | if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) || |
2247 | (tcp_is_reno(tp) && !(flag&FLAG_NOT_DUP))) { | 2249 | (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) { |
2248 | tp->snd_cwnd_cnt = decr&1; | 2250 | tp->snd_cwnd_cnt = decr & 1; |
2249 | decr >>= 1; | 2251 | decr >>= 1; |
2250 | 2252 | ||
2251 | if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) | 2253 | if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) |
2252 | tp->snd_cwnd -= decr; | 2254 | tp->snd_cwnd -= decr; |
2253 | 2255 | ||
2254 | tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); | 2256 | tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); |
2255 | tp->snd_cwnd_stamp = tcp_time_stamp; | 2257 | tp->snd_cwnd_stamp = tcp_time_stamp; |
2256 | } | 2258 | } |
2257 | } | 2259 | } |
@@ -2295,7 +2297,7 @@ static void tcp_undo_cwr(struct sock *sk, const int undo) | |||
2295 | if (icsk->icsk_ca_ops->undo_cwnd) | 2297 | if (icsk->icsk_ca_ops->undo_cwnd) |
2296 | tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); | 2298 | tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); |
2297 | else | 2299 | else |
2298 | tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1); | 2300 | tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); |
2299 | 2301 | ||
2300 | if (undo && tp->prior_ssthresh > tp->snd_ssthresh) { | 2302 | if (undo && tp->prior_ssthresh > tp->snd_ssthresh) { |
2301 | tp->snd_ssthresh = tp->prior_ssthresh; | 2303 | tp->snd_ssthresh = tp->prior_ssthresh; |
@@ -2314,8 +2316,7 @@ static void tcp_undo_cwr(struct sock *sk, const int undo) | |||
2314 | 2316 | ||
2315 | static inline int tcp_may_undo(struct tcp_sock *tp) | 2317 | static inline int tcp_may_undo(struct tcp_sock *tp) |
2316 | { | 2318 | { |
2317 | return tp->undo_marker && | 2319 | return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); |
2318 | (!tp->undo_retrans || tcp_packet_delayed(tp)); | ||
2319 | } | 2320 | } |
2320 | 2321 | ||
2321 | /* People celebrate: "We love our President!" */ | 2322 | /* People celebrate: "We love our President!" */ |
@@ -2434,7 +2435,7 @@ static void tcp_try_to_open(struct sock *sk, int flag) | |||
2434 | if (tp->retrans_out == 0) | 2435 | if (tp->retrans_out == 0) |
2435 | tp->retrans_stamp = 0; | 2436 | tp->retrans_stamp = 0; |
2436 | 2437 | ||
2437 | if (flag&FLAG_ECE) | 2438 | if (flag & FLAG_ECE) |
2438 | tcp_enter_cwr(sk, 1); | 2439 | tcp_enter_cwr(sk, 1); |
2439 | 2440 | ||
2440 | if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { | 2441 | if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { |
@@ -2480,7 +2481,6 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb) | |||
2480 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); | 2481 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
2481 | } | 2482 | } |
2482 | 2483 | ||
2483 | |||
2484 | /* Process an event, which can update packets-in-flight not trivially. | 2484 | /* Process an event, which can update packets-in-flight not trivially. |
2485 | * Main goal of this function is to calculate new estimate for left_out, | 2485 | * Main goal of this function is to calculate new estimate for left_out, |
2486 | * taking into account both packets sitting in receiver's buffer and | 2486 | * taking into account both packets sitting in receiver's buffer and |
@@ -2492,13 +2492,12 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb) | |||
2492 | * It does _not_ decide what to send, it is made in function | 2492 | * It does _not_ decide what to send, it is made in function |
2493 | * tcp_xmit_retransmit_queue(). | 2493 | * tcp_xmit_retransmit_queue(). |
2494 | */ | 2494 | */ |
2495 | static void | 2495 | static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) |
2496 | tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) | ||
2497 | { | 2496 | { |
2498 | struct inet_connection_sock *icsk = inet_csk(sk); | 2497 | struct inet_connection_sock *icsk = inet_csk(sk); |
2499 | struct tcp_sock *tp = tcp_sk(sk); | 2498 | struct tcp_sock *tp = tcp_sk(sk); |
2500 | int is_dupack = !(flag&(FLAG_SND_UNA_ADVANCED|FLAG_NOT_DUP)); | 2499 | int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); |
2501 | int do_lost = is_dupack || ((flag&FLAG_DATA_SACKED) && | 2500 | int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && |
2502 | (tcp_fackets_out(tp) > tp->reordering)); | 2501 | (tcp_fackets_out(tp) > tp->reordering)); |
2503 | int fast_rexmit = 0; | 2502 | int fast_rexmit = 0; |
2504 | 2503 | ||
@@ -2509,7 +2508,7 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) | |||
2509 | 2508 | ||
2510 | /* Now state machine starts. | 2509 | /* Now state machine starts. |
2511 | * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ | 2510 | * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ |
2512 | if (flag&FLAG_ECE) | 2511 | if (flag & FLAG_ECE) |
2513 | tp->prior_ssthresh = 0; | 2512 | tp->prior_ssthresh = 0; |
2514 | 2513 | ||
2515 | /* B. In all the states check for reneging SACKs. */ | 2514 | /* B. In all the states check for reneging SACKs. */ |
@@ -2521,7 +2520,7 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) | |||
2521 | before(tp->snd_una, tp->high_seq) && | 2520 | before(tp->snd_una, tp->high_seq) && |
2522 | icsk->icsk_ca_state != TCP_CA_Open && | 2521 | icsk->icsk_ca_state != TCP_CA_Open && |
2523 | tp->fackets_out > tp->reordering) { | 2522 | tp->fackets_out > tp->reordering) { |
2524 | tcp_mark_head_lost(sk, tp->fackets_out-tp->reordering, 0); | 2523 | tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0); |
2525 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); | 2524 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); |
2526 | } | 2525 | } |
2527 | 2526 | ||
@@ -2581,7 +2580,7 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) | |||
2581 | do_lost = tcp_try_undo_partial(sk, pkts_acked); | 2580 | do_lost = tcp_try_undo_partial(sk, pkts_acked); |
2582 | break; | 2581 | break; |
2583 | case TCP_CA_Loss: | 2582 | case TCP_CA_Loss: |
2584 | if (flag&FLAG_DATA_ACKED) | 2583 | if (flag & FLAG_DATA_ACKED) |
2585 | icsk->icsk_retransmits = 0; | 2584 | icsk->icsk_retransmits = 0; |
2586 | if (!tcp_try_undo_loss(sk)) { | 2585 | if (!tcp_try_undo_loss(sk)) { |
2587 | tcp_moderate_cwnd(tp); | 2586 | tcp_moderate_cwnd(tp); |
@@ -2631,7 +2630,7 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) | |||
2631 | tp->undo_retrans = tp->retrans_out; | 2630 | tp->undo_retrans = tp->retrans_out; |
2632 | 2631 | ||
2633 | if (icsk->icsk_ca_state < TCP_CA_CWR) { | 2632 | if (icsk->icsk_ca_state < TCP_CA_CWR) { |
2634 | if (!(flag&FLAG_ECE)) | 2633 | if (!(flag & FLAG_ECE)) |
2635 | tp->prior_ssthresh = tcp_current_ssthresh(sk); | 2634 | tp->prior_ssthresh = tcp_current_ssthresh(sk); |
2636 | tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); | 2635 | tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); |
2637 | TCP_ECN_queue_cwr(tp); | 2636 | TCP_ECN_queue_cwr(tp); |
@@ -2725,7 +2724,8 @@ static void tcp_rearm_rto(struct sock *sk) | |||
2725 | if (!tp->packets_out) { | 2724 | if (!tp->packets_out) { |
2726 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); | 2725 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); |
2727 | } else { | 2726 | } else { |
2728 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX); | 2727 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
2728 | inet_csk(sk)->icsk_rto, TCP_RTO_MAX); | ||
2729 | } | 2729 | } |
2730 | } | 2730 | } |
2731 | 2731 | ||
@@ -2803,8 +2803,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets) | |||
2803 | flag |= FLAG_RETRANS_DATA_ACKED; | 2803 | flag |= FLAG_RETRANS_DATA_ACKED; |
2804 | ca_seq_rtt = -1; | 2804 | ca_seq_rtt = -1; |
2805 | seq_rtt = -1; | 2805 | seq_rtt = -1; |
2806 | if ((flag & FLAG_DATA_ACKED) || | 2806 | if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1)) |
2807 | (acked_pcount > 1)) | ||
2808 | flag |= FLAG_NONHEAD_RETRANS_ACKED; | 2807 | flag |= FLAG_NONHEAD_RETRANS_ACKED; |
2809 | } else { | 2808 | } else { |
2810 | ca_seq_rtt = now - scb->when; | 2809 | ca_seq_rtt = now - scb->when; |
@@ -2950,8 +2949,9 @@ static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag) | |||
2950 | /* Check that window update is acceptable. | 2949 | /* Check that window update is acceptable. |
2951 | * The function assumes that snd_una<=ack<=snd_next. | 2950 | * The function assumes that snd_una<=ack<=snd_next. |
2952 | */ | 2951 | */ |
2953 | static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack, | 2952 | static inline int tcp_may_update_window(const struct tcp_sock *tp, |
2954 | const u32 ack_seq, const u32 nwin) | 2953 | const u32 ack, const u32 ack_seq, |
2954 | const u32 nwin) | ||
2955 | { | 2955 | { |
2956 | return (after(ack, tp->snd_una) || | 2956 | return (after(ack, tp->snd_una) || |
2957 | after(ack_seq, tp->snd_wl1) || | 2957 | after(ack_seq, tp->snd_wl1) || |
@@ -3020,7 +3020,7 @@ static void tcp_ratehalving_spur_to_response(struct sock *sk) | |||
3020 | 3020 | ||
3021 | static void tcp_undo_spur_to_response(struct sock *sk, int flag) | 3021 | static void tcp_undo_spur_to_response(struct sock *sk, int flag) |
3022 | { | 3022 | { |
3023 | if (flag&FLAG_ECE) | 3023 | if (flag & FLAG_ECE) |
3024 | tcp_ratehalving_spur_to_response(sk); | 3024 | tcp_ratehalving_spur_to_response(sk); |
3025 | else | 3025 | else |
3026 | tcp_undo_cwr(sk, 1); | 3026 | tcp_undo_cwr(sk, 1); |
@@ -3063,7 +3063,7 @@ static int tcp_process_frto(struct sock *sk, int flag) | |||
3063 | tcp_verify_left_out(tp); | 3063 | tcp_verify_left_out(tp); |
3064 | 3064 | ||
3065 | /* Duplicate the behavior from Loss state (fastretrans_alert) */ | 3065 | /* Duplicate the behavior from Loss state (fastretrans_alert) */ |
3066 | if (flag&FLAG_DATA_ACKED) | 3066 | if (flag & FLAG_DATA_ACKED) |
3067 | inet_csk(sk)->icsk_retransmits = 0; | 3067 | inet_csk(sk)->icsk_retransmits = 0; |
3068 | 3068 | ||
3069 | if ((flag & FLAG_NONHEAD_RETRANS_ACKED) || | 3069 | if ((flag & FLAG_NONHEAD_RETRANS_ACKED) || |
@@ -3080,16 +3080,16 @@ static int tcp_process_frto(struct sock *sk, int flag) | |||
3080 | * ACK isn't duplicate nor advances window, e.g., opposite dir | 3080 | * ACK isn't duplicate nor advances window, e.g., opposite dir |
3081 | * data, winupdate | 3081 | * data, winupdate |
3082 | */ | 3082 | */ |
3083 | if (!(flag&FLAG_ANY_PROGRESS) && (flag&FLAG_NOT_DUP)) | 3083 | if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP)) |
3084 | return 1; | 3084 | return 1; |
3085 | 3085 | ||
3086 | if (!(flag&FLAG_DATA_ACKED)) { | 3086 | if (!(flag & FLAG_DATA_ACKED)) { |
3087 | tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), | 3087 | tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), |
3088 | flag); | 3088 | flag); |
3089 | return 1; | 3089 | return 1; |
3090 | } | 3090 | } |
3091 | } else { | 3091 | } else { |
3092 | if (!(flag&FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { | 3092 | if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { |
3093 | /* Prevent sending of new data. */ | 3093 | /* Prevent sending of new data. */ |
3094 | tp->snd_cwnd = min(tp->snd_cwnd, | 3094 | tp->snd_cwnd = min(tp->snd_cwnd, |
3095 | tcp_packets_in_flight(tp)); | 3095 | tcp_packets_in_flight(tp)); |
@@ -3097,10 +3097,12 @@ static int tcp_process_frto(struct sock *sk, int flag) | |||
3097 | } | 3097 | } |
3098 | 3098 | ||
3099 | if ((tp->frto_counter >= 2) && | 3099 | if ((tp->frto_counter >= 2) && |
3100 | (!(flag&FLAG_FORWARD_PROGRESS) || | 3100 | (!(flag & FLAG_FORWARD_PROGRESS) || |
3101 | ((flag&FLAG_DATA_SACKED) && !(flag&FLAG_ONLY_ORIG_SACKED)))) { | 3101 | ((flag & FLAG_DATA_SACKED) && |
3102 | !(flag & FLAG_ONLY_ORIG_SACKED)))) { | ||
3102 | /* RFC4138 shortcoming (see comment above) */ | 3103 | /* RFC4138 shortcoming (see comment above) */ |
3103 | if (!(flag&FLAG_FORWARD_PROGRESS) && (flag&FLAG_NOT_DUP)) | 3104 | if (!(flag & FLAG_FORWARD_PROGRESS) && |
3105 | (flag & FLAG_NOT_DUP)) | ||
3104 | return 1; | 3106 | return 1; |
3105 | 3107 | ||
3106 | tcp_enter_frto_loss(sk, 3, flag); | 3108 | tcp_enter_frto_loss(sk, 3, flag); |
@@ -3166,13 +3168,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) | |||
3166 | tp->bytes_acked += ack - prior_snd_una; | 3168 | tp->bytes_acked += ack - prior_snd_una; |
3167 | else if (icsk->icsk_ca_state == TCP_CA_Loss) | 3169 | else if (icsk->icsk_ca_state == TCP_CA_Loss) |
3168 | /* we assume just one segment left network */ | 3170 | /* we assume just one segment left network */ |
3169 | tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache); | 3171 | tp->bytes_acked += min(ack - prior_snd_una, |
3172 | tp->mss_cache); | ||
3170 | } | 3173 | } |
3171 | 3174 | ||
3172 | prior_fackets = tp->fackets_out; | 3175 | prior_fackets = tp->fackets_out; |
3173 | prior_in_flight = tcp_packets_in_flight(tp); | 3176 | prior_in_flight = tcp_packets_in_flight(tp); |
3174 | 3177 | ||
3175 | if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) { | 3178 | if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { |
3176 | /* Window is constant, pure forward advance. | 3179 | /* Window is constant, pure forward advance. |
3177 | * No more checks are required. | 3180 | * No more checks are required. |
3178 | * Note, we use the fact that SND.UNA>=SND.WL2. | 3181 | * Note, we use the fact that SND.UNA>=SND.WL2. |
@@ -3224,13 +3227,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) | |||
3224 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && | 3227 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && |
3225 | tcp_may_raise_cwnd(sk, flag)) | 3228 | tcp_may_raise_cwnd(sk, flag)) |
3226 | tcp_cong_avoid(sk, ack, prior_in_flight); | 3229 | tcp_cong_avoid(sk, ack, prior_in_flight); |
3227 | tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, flag); | 3230 | tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, |
3231 | flag); | ||
3228 | } else { | 3232 | } else { |
3229 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) | 3233 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) |
3230 | tcp_cong_avoid(sk, ack, prior_in_flight); | 3234 | tcp_cong_avoid(sk, ack, prior_in_flight); |
3231 | } | 3235 | } |
3232 | 3236 | ||
3233 | if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP)) | 3237 | if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) |
3234 | dst_confirm(sk->sk_dst_cache); | 3238 | dst_confirm(sk->sk_dst_cache); |
3235 | 3239 | ||
3236 | return 1; | 3240 | return 1; |
@@ -3255,22 +3259,22 @@ uninteresting_ack: | |||
3255 | return 0; | 3259 | return 0; |
3256 | } | 3260 | } |
3257 | 3261 | ||
3258 | |||
3259 | /* Look for tcp options. Normally only called on SYN and SYNACK packets. | 3262 | /* Look for tcp options. Normally only called on SYN and SYNACK packets. |
3260 | * But, this can also be called on packets in the established flow when | 3263 | * But, this can also be called on packets in the established flow when |
3261 | * the fast version below fails. | 3264 | * the fast version below fails. |
3262 | */ | 3265 | */ |
3263 | void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab) | 3266 | void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, |
3267 | int estab) | ||
3264 | { | 3268 | { |
3265 | unsigned char *ptr; | 3269 | unsigned char *ptr; |
3266 | struct tcphdr *th = tcp_hdr(skb); | 3270 | struct tcphdr *th = tcp_hdr(skb); |
3267 | int length=(th->doff*4)-sizeof(struct tcphdr); | 3271 | int length = (th->doff * 4) - sizeof(struct tcphdr); |
3268 | 3272 | ||
3269 | ptr = (unsigned char *)(th + 1); | 3273 | ptr = (unsigned char *)(th + 1); |
3270 | opt_rx->saw_tstamp = 0; | 3274 | opt_rx->saw_tstamp = 0; |
3271 | 3275 | ||
3272 | while (length > 0) { | 3276 | while (length > 0) { |
3273 | int opcode=*ptr++; | 3277 | int opcode = *ptr++; |
3274 | int opsize; | 3278 | int opsize; |
3275 | 3279 | ||
3276 | switch (opcode) { | 3280 | switch (opcode) { |
@@ -3359,7 +3363,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3359 | static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, | 3363 | static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, |
3360 | struct tcp_sock *tp) | 3364 | struct tcp_sock *tp) |
3361 | { | 3365 | { |
3362 | if (th->doff == sizeof(struct tcphdr)>>2) { | 3366 | if (th->doff == sizeof(struct tcphdr) >> 2) { |
3363 | tp->rx_opt.saw_tstamp = 0; | 3367 | tp->rx_opt.saw_tstamp = 0; |
3364 | return 0; | 3368 | return 0; |
3365 | } else if (tp->rx_opt.tstamp_ok && | 3369 | } else if (tp->rx_opt.tstamp_ok && |
@@ -3444,7 +3448,8 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) | |||
3444 | (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); | 3448 | (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); |
3445 | } | 3449 | } |
3446 | 3450 | ||
3447 | static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb) | 3451 | static inline int tcp_paws_discard(const struct sock *sk, |
3452 | const struct sk_buff *skb) | ||
3448 | { | 3453 | { |
3449 | const struct tcp_sock *tp = tcp_sk(sk); | 3454 | const struct tcp_sock *tp = tcp_sk(sk); |
3450 | return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && | 3455 | return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && |
@@ -3476,16 +3481,16 @@ static void tcp_reset(struct sock *sk) | |||
3476 | { | 3481 | { |
3477 | /* We want the right error as BSD sees it (and indeed as we do). */ | 3482 | /* We want the right error as BSD sees it (and indeed as we do). */ |
3478 | switch (sk->sk_state) { | 3483 | switch (sk->sk_state) { |
3479 | case TCP_SYN_SENT: | 3484 | case TCP_SYN_SENT: |
3480 | sk->sk_err = ECONNREFUSED; | 3485 | sk->sk_err = ECONNREFUSED; |
3481 | break; | 3486 | break; |
3482 | case TCP_CLOSE_WAIT: | 3487 | case TCP_CLOSE_WAIT: |
3483 | sk->sk_err = EPIPE; | 3488 | sk->sk_err = EPIPE; |
3484 | break; | 3489 | break; |
3485 | case TCP_CLOSE: | 3490 | case TCP_CLOSE: |
3486 | return; | 3491 | return; |
3487 | default: | 3492 | default: |
3488 | sk->sk_err = ECONNRESET; | 3493 | sk->sk_err = ECONNRESET; |
3489 | } | 3494 | } |
3490 | 3495 | ||
3491 | if (!sock_flag(sk, SOCK_DEAD)) | 3496 | if (!sock_flag(sk, SOCK_DEAD)) |
@@ -3518,43 +3523,43 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) | |||
3518 | sock_set_flag(sk, SOCK_DONE); | 3523 | sock_set_flag(sk, SOCK_DONE); |
3519 | 3524 | ||
3520 | switch (sk->sk_state) { | 3525 | switch (sk->sk_state) { |
3521 | case TCP_SYN_RECV: | 3526 | case TCP_SYN_RECV: |
3522 | case TCP_ESTABLISHED: | 3527 | case TCP_ESTABLISHED: |
3523 | /* Move to CLOSE_WAIT */ | 3528 | /* Move to CLOSE_WAIT */ |
3524 | tcp_set_state(sk, TCP_CLOSE_WAIT); | 3529 | tcp_set_state(sk, TCP_CLOSE_WAIT); |
3525 | inet_csk(sk)->icsk_ack.pingpong = 1; | 3530 | inet_csk(sk)->icsk_ack.pingpong = 1; |
3526 | break; | 3531 | break; |
3527 | 3532 | ||
3528 | case TCP_CLOSE_WAIT: | 3533 | case TCP_CLOSE_WAIT: |
3529 | case TCP_CLOSING: | 3534 | case TCP_CLOSING: |
3530 | /* Received a retransmission of the FIN, do | 3535 | /* Received a retransmission of the FIN, do |
3531 | * nothing. | 3536 | * nothing. |
3532 | */ | 3537 | */ |
3533 | break; | 3538 | break; |
3534 | case TCP_LAST_ACK: | 3539 | case TCP_LAST_ACK: |
3535 | /* RFC793: Remain in the LAST-ACK state. */ | 3540 | /* RFC793: Remain in the LAST-ACK state. */ |
3536 | break; | 3541 | break; |
3537 | 3542 | ||
3538 | case TCP_FIN_WAIT1: | 3543 | case TCP_FIN_WAIT1: |
3539 | /* This case occurs when a simultaneous close | 3544 | /* This case occurs when a simultaneous close |
3540 | * happens, we must ack the received FIN and | 3545 | * happens, we must ack the received FIN and |
3541 | * enter the CLOSING state. | 3546 | * enter the CLOSING state. |
3542 | */ | 3547 | */ |
3543 | tcp_send_ack(sk); | 3548 | tcp_send_ack(sk); |
3544 | tcp_set_state(sk, TCP_CLOSING); | 3549 | tcp_set_state(sk, TCP_CLOSING); |
3545 | break; | 3550 | break; |
3546 | case TCP_FIN_WAIT2: | 3551 | case TCP_FIN_WAIT2: |
3547 | /* Received a FIN -- send ACK and enter TIME_WAIT. */ | 3552 | /* Received a FIN -- send ACK and enter TIME_WAIT. */ |
3548 | tcp_send_ack(sk); | 3553 | tcp_send_ack(sk); |
3549 | tcp_time_wait(sk, TCP_TIME_WAIT, 0); | 3554 | tcp_time_wait(sk, TCP_TIME_WAIT, 0); |
3550 | break; | 3555 | break; |
3551 | default: | 3556 | default: |
3552 | /* Only TCP_LISTEN and TCP_CLOSE are left, in these | 3557 | /* Only TCP_LISTEN and TCP_CLOSE are left, in these |
3553 | * cases we should never reach this piece of code. | 3558 | * cases we should never reach this piece of code. |
3554 | */ | 3559 | */ |
3555 | printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n", | 3560 | printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n", |
3556 | __FUNCTION__, sk->sk_state); | 3561 | __FUNCTION__, sk->sk_state); |
3557 | break; | 3562 | break; |
3558 | } | 3563 | } |
3559 | 3564 | ||
3560 | /* It _is_ possible, that we have something out-of-order _after_ FIN. | 3565 | /* It _is_ possible, that we have something out-of-order _after_ FIN. |
@@ -3577,7 +3582,8 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) | |||
3577 | } | 3582 | } |
3578 | } | 3583 | } |
3579 | 3584 | ||
3580 | static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) | 3585 | static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, |
3586 | u32 end_seq) | ||
3581 | { | 3587 | { |
3582 | if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { | 3588 | if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { |
3583 | if (before(seq, sp->start_seq)) | 3589 | if (before(seq, sp->start_seq)) |
@@ -3600,7 +3606,8 @@ static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) | |||
3600 | tp->rx_opt.dsack = 1; | 3606 | tp->rx_opt.dsack = 1; |
3601 | tp->duplicate_sack[0].start_seq = seq; | 3607 | tp->duplicate_sack[0].start_seq = seq; |
3602 | tp->duplicate_sack[0].end_seq = end_seq; | 3608 | tp->duplicate_sack[0].end_seq = end_seq; |
3603 | tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, 4 - tp->rx_opt.tstamp_ok); | 3609 | tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, |
3610 | 4 - tp->rx_opt.tstamp_ok); | ||
3604 | } | 3611 | } |
3605 | } | 3612 | } |
3606 | 3613 | ||
@@ -3640,12 +3647,12 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) | |||
3640 | { | 3647 | { |
3641 | int this_sack; | 3648 | int this_sack; |
3642 | struct tcp_sack_block *sp = &tp->selective_acks[0]; | 3649 | struct tcp_sack_block *sp = &tp->selective_acks[0]; |
3643 | struct tcp_sack_block *swalk = sp+1; | 3650 | struct tcp_sack_block *swalk = sp + 1; |
3644 | 3651 | ||
3645 | /* See if the recent change to the first SACK eats into | 3652 | /* See if the recent change to the first SACK eats into |
3646 | * or hits the sequence space of other SACK blocks, if so coalesce. | 3653 | * or hits the sequence space of other SACK blocks, if so coalesce. |
3647 | */ | 3654 | */ |
3648 | for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; ) { | 3655 | for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { |
3649 | if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { | 3656 | if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { |
3650 | int i; | 3657 | int i; |
3651 | 3658 | ||
@@ -3653,16 +3660,19 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) | |||
3653 | * Decrease num_sacks. | 3660 | * Decrease num_sacks. |
3654 | */ | 3661 | */ |
3655 | tp->rx_opt.num_sacks--; | 3662 | tp->rx_opt.num_sacks--; |
3656 | tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok); | 3663 | tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + |
3657 | for (i=this_sack; i < tp->rx_opt.num_sacks; i++) | 3664 | tp->rx_opt.dsack, |
3658 | sp[i] = sp[i+1]; | 3665 | 4 - tp->rx_opt.tstamp_ok); |
3666 | for (i = this_sack; i < tp->rx_opt.num_sacks; i++) | ||
3667 | sp[i] = sp[i + 1]; | ||
3659 | continue; | 3668 | continue; |
3660 | } | 3669 | } |
3661 | this_sack++, swalk++; | 3670 | this_sack++, swalk++; |
3662 | } | 3671 | } |
3663 | } | 3672 | } |
3664 | 3673 | ||
3665 | static inline void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2) | 3674 | static inline void tcp_sack_swap(struct tcp_sack_block *sack1, |
3675 | struct tcp_sack_block *sack2) | ||
3666 | { | 3676 | { |
3667 | __u32 tmp; | 3677 | __u32 tmp; |
3668 | 3678 | ||
@@ -3685,11 +3695,11 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) | |||
3685 | if (!cur_sacks) | 3695 | if (!cur_sacks) |
3686 | goto new_sack; | 3696 | goto new_sack; |
3687 | 3697 | ||
3688 | for (this_sack=0; this_sack<cur_sacks; this_sack++, sp++) { | 3698 | for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) { |
3689 | if (tcp_sack_extend(sp, seq, end_seq)) { | 3699 | if (tcp_sack_extend(sp, seq, end_seq)) { |
3690 | /* Rotate this_sack to the first one. */ | 3700 | /* Rotate this_sack to the first one. */ |
3691 | for (; this_sack>0; this_sack--, sp--) | 3701 | for (; this_sack > 0; this_sack--, sp--) |
3692 | tcp_sack_swap(sp, sp-1); | 3702 | tcp_sack_swap(sp, sp - 1); |
3693 | if (cur_sacks > 1) | 3703 | if (cur_sacks > 1) |
3694 | tcp_sack_maybe_coalesce(tp); | 3704 | tcp_sack_maybe_coalesce(tp); |
3695 | return; | 3705 | return; |
@@ -3708,14 +3718,15 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) | |||
3708 | sp--; | 3718 | sp--; |
3709 | } | 3719 | } |
3710 | for (; this_sack > 0; this_sack--, sp--) | 3720 | for (; this_sack > 0; this_sack--, sp--) |
3711 | *sp = *(sp-1); | 3721 | *sp = *(sp - 1); |
3712 | 3722 | ||
3713 | new_sack: | 3723 | new_sack: |
3714 | /* Build the new head SACK, and we're done. */ | 3724 | /* Build the new head SACK, and we're done. */ |
3715 | sp->start_seq = seq; | 3725 | sp->start_seq = seq; |
3716 | sp->end_seq = end_seq; | 3726 | sp->end_seq = end_seq; |
3717 | tp->rx_opt.num_sacks++; | 3727 | tp->rx_opt.num_sacks++; |
3718 | tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok); | 3728 | tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, |
3729 | 4 - tp->rx_opt.tstamp_ok); | ||
3719 | } | 3730 | } |
3720 | 3731 | ||
3721 | /* RCV.NXT advances, some SACKs should be eaten. */ | 3732 | /* RCV.NXT advances, some SACKs should be eaten. */ |
@@ -3733,7 +3744,7 @@ static void tcp_sack_remove(struct tcp_sock *tp) | |||
3733 | return; | 3744 | return; |
3734 | } | 3745 | } |
3735 | 3746 | ||
3736 | for (this_sack = 0; this_sack < num_sacks; ) { | 3747 | for (this_sack = 0; this_sack < num_sacks;) { |
3737 | /* Check if the start of the sack is covered by RCV.NXT. */ | 3748 | /* Check if the start of the sack is covered by RCV.NXT. */ |
3738 | if (!before(tp->rcv_nxt, sp->start_seq)) { | 3749 | if (!before(tp->rcv_nxt, sp->start_seq)) { |
3739 | int i; | 3750 | int i; |
@@ -3752,7 +3763,9 @@ static void tcp_sack_remove(struct tcp_sock *tp) | |||
3752 | } | 3763 | } |
3753 | if (num_sacks != tp->rx_opt.num_sacks) { | 3764 | if (num_sacks != tp->rx_opt.num_sacks) { |
3754 | tp->rx_opt.num_sacks = num_sacks; | 3765 | tp->rx_opt.num_sacks = num_sacks; |
3755 | tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok); | 3766 | tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + |
3767 | tp->rx_opt.dsack, | ||
3768 | 4 - tp->rx_opt.tstamp_ok); | ||
3756 | } | 3769 | } |
3757 | } | 3770 | } |
3758 | 3771 | ||
@@ -3805,14 +3818,14 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | |||
3805 | if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) | 3818 | if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) |
3806 | goto drop; | 3819 | goto drop; |
3807 | 3820 | ||
3808 | __skb_pull(skb, th->doff*4); | 3821 | __skb_pull(skb, th->doff * 4); |
3809 | 3822 | ||
3810 | TCP_ECN_accept_cwr(tp, skb); | 3823 | TCP_ECN_accept_cwr(tp, skb); |
3811 | 3824 | ||
3812 | if (tp->rx_opt.dsack) { | 3825 | if (tp->rx_opt.dsack) { |
3813 | tp->rx_opt.dsack = 0; | 3826 | tp->rx_opt.dsack = 0; |
3814 | tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks, | 3827 | tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks, |
3815 | 4 - tp->rx_opt.tstamp_ok); | 3828 | 4 - tp->rx_opt.tstamp_ok); |
3816 | } | 3829 | } |
3817 | 3830 | ||
3818 | /* Queue data for delivery to the user. | 3831 | /* Queue data for delivery to the user. |
@@ -3828,7 +3841,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | |||
3828 | tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && | 3841 | tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && |
3829 | sock_owned_by_user(sk) && !tp->urg_data) { | 3842 | sock_owned_by_user(sk) && !tp->urg_data) { |
3830 | int chunk = min_t(unsigned int, skb->len, | 3843 | int chunk = min_t(unsigned int, skb->len, |
3831 | tp->ucopy.len); | 3844 | tp->ucopy.len); |
3832 | 3845 | ||
3833 | __set_current_state(TASK_RUNNING); | 3846 | __set_current_state(TASK_RUNNING); |
3834 | 3847 | ||
@@ -3945,7 +3958,7 @@ drop: | |||
3945 | tp->selective_acks[0].end_seq = | 3958 | tp->selective_acks[0].end_seq = |
3946 | TCP_SKB_CB(skb)->end_seq; | 3959 | TCP_SKB_CB(skb)->end_seq; |
3947 | } | 3960 | } |
3948 | __skb_queue_head(&tp->out_of_order_queue,skb); | 3961 | __skb_queue_head(&tp->out_of_order_queue, skb); |
3949 | } else { | 3962 | } else { |
3950 | struct sk_buff *skb1 = tp->out_of_order_queue.prev; | 3963 | struct sk_buff *skb1 = tp->out_of_order_queue.prev; |
3951 | u32 seq = TCP_SKB_CB(skb)->seq; | 3964 | u32 seq = TCP_SKB_CB(skb)->seq; |
@@ -3968,10 +3981,10 @@ drop: | |||
3968 | if (!after(TCP_SKB_CB(skb1)->seq, seq)) | 3981 | if (!after(TCP_SKB_CB(skb1)->seq, seq)) |
3969 | break; | 3982 | break; |
3970 | } while ((skb1 = skb1->prev) != | 3983 | } while ((skb1 = skb1->prev) != |
3971 | (struct sk_buff*)&tp->out_of_order_queue); | 3984 | (struct sk_buff *)&tp->out_of_order_queue); |
3972 | 3985 | ||
3973 | /* Do skb overlap to previous one? */ | 3986 | /* Do skb overlap to previous one? */ |
3974 | if (skb1 != (struct sk_buff*)&tp->out_of_order_queue && | 3987 | if (skb1 != (struct sk_buff *)&tp->out_of_order_queue && |
3975 | before(seq, TCP_SKB_CB(skb1)->end_seq)) { | 3988 | before(seq, TCP_SKB_CB(skb1)->end_seq)) { |
3976 | if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { | 3989 | if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { |
3977 | /* All the bits are present. Drop. */ | 3990 | /* All the bits are present. Drop. */ |
@@ -3981,7 +3994,8 @@ drop: | |||
3981 | } | 3994 | } |
3982 | if (after(seq, TCP_SKB_CB(skb1)->seq)) { | 3995 | if (after(seq, TCP_SKB_CB(skb1)->seq)) { |
3983 | /* Partial overlap. */ | 3996 | /* Partial overlap. */ |
3984 | tcp_dsack_set(tp, seq, TCP_SKB_CB(skb1)->end_seq); | 3997 | tcp_dsack_set(tp, seq, |
3998 | TCP_SKB_CB(skb1)->end_seq); | ||
3985 | } else { | 3999 | } else { |
3986 | skb1 = skb1->prev; | 4000 | skb1 = skb1->prev; |
3987 | } | 4001 | } |
@@ -3990,15 +4004,17 @@ drop: | |||
3990 | 4004 | ||
3991 | /* And clean segments covered by new one as whole. */ | 4005 | /* And clean segments covered by new one as whole. */ |
3992 | while ((skb1 = skb->next) != | 4006 | while ((skb1 = skb->next) != |
3993 | (struct sk_buff*)&tp->out_of_order_queue && | 4007 | (struct sk_buff *)&tp->out_of_order_queue && |
3994 | after(end_seq, TCP_SKB_CB(skb1)->seq)) { | 4008 | after(end_seq, TCP_SKB_CB(skb1)->seq)) { |
3995 | if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { | 4009 | if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { |
3996 | tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq); | 4010 | tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, |
3997 | break; | 4011 | end_seq); |
3998 | } | 4012 | break; |
3999 | __skb_unlink(skb1, &tp->out_of_order_queue); | 4013 | } |
4000 | tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); | 4014 | __skb_unlink(skb1, &tp->out_of_order_queue); |
4001 | __kfree_skb(skb1); | 4015 | tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, |
4016 | TCP_SKB_CB(skb1)->end_seq); | ||
4017 | __kfree_skb(skb1); | ||
4002 | } | 4018 | } |
4003 | 4019 | ||
4004 | add_sack: | 4020 | add_sack: |
@@ -4021,7 +4037,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4021 | 4037 | ||
4022 | /* First, check that queue is collapsible and find | 4038 | /* First, check that queue is collapsible and find |
4023 | * the point where collapsing can be useful. */ | 4039 | * the point where collapsing can be useful. */ |
4024 | for (skb = head; skb != tail; ) { | 4040 | for (skb = head; skb != tail;) { |
4025 | /* No new bits? It is possible on ofo queue. */ | 4041 | /* No new bits? It is possible on ofo queue. */ |
4026 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { | 4042 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { |
4027 | struct sk_buff *next = skb->next; | 4043 | struct sk_buff *next = skb->next; |
@@ -4059,9 +4075,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4059 | /* Too big header? This can happen with IPv6. */ | 4075 | /* Too big header? This can happen with IPv6. */ |
4060 | if (copy < 0) | 4076 | if (copy < 0) |
4061 | return; | 4077 | return; |
4062 | if (end-start < copy) | 4078 | if (end - start < copy) |
4063 | copy = end-start; | 4079 | copy = end - start; |
4064 | nskb = alloc_skb(copy+header, GFP_ATOMIC); | 4080 | nskb = alloc_skb(copy + header, GFP_ATOMIC); |
4065 | if (!nskb) | 4081 | if (!nskb) |
4066 | return; | 4082 | return; |
4067 | 4083 | ||
@@ -4171,7 +4187,7 @@ static int tcp_prune_queue(struct sock *sk) | |||
4171 | tcp_collapse_ofo_queue(sk); | 4187 | tcp_collapse_ofo_queue(sk); |
4172 | tcp_collapse(sk, &sk->sk_receive_queue, | 4188 | tcp_collapse(sk, &sk->sk_receive_queue, |
4173 | sk->sk_receive_queue.next, | 4189 | sk->sk_receive_queue.next, |
4174 | (struct sk_buff*)&sk->sk_receive_queue, | 4190 | (struct sk_buff *)&sk->sk_receive_queue, |
4175 | tp->copied_seq, tp->rcv_nxt); | 4191 | tp->copied_seq, tp->rcv_nxt); |
4176 | sk_mem_reclaim(sk); | 4192 | sk_mem_reclaim(sk); |
4177 | 4193 | ||
@@ -4210,7 +4226,6 @@ static int tcp_prune_queue(struct sock *sk) | |||
4210 | return -1; | 4226 | return -1; |
4211 | } | 4227 | } |
4212 | 4228 | ||
4213 | |||
4214 | /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. | 4229 | /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. |
4215 | * As additional protections, we do not touch cwnd in retransmission phases, | 4230 | * As additional protections, we do not touch cwnd in retransmission phases, |
4216 | * and if application hit its sndbuf limit recently. | 4231 | * and if application hit its sndbuf limit recently. |
@@ -4272,8 +4287,8 @@ static void tcp_new_space(struct sock *sk) | |||
4272 | int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + | 4287 | int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + |
4273 | MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), | 4288 | MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), |
4274 | demanded = max_t(unsigned int, tp->snd_cwnd, | 4289 | demanded = max_t(unsigned int, tp->snd_cwnd, |
4275 | tp->reordering + 1); | 4290 | tp->reordering + 1); |
4276 | sndmem *= 2*demanded; | 4291 | sndmem *= 2 * demanded; |
4277 | if (sndmem > sk->sk_sndbuf) | 4292 | if (sndmem > sk->sk_sndbuf) |
4278 | sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); | 4293 | sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); |
4279 | tp->snd_cwnd_stamp = tcp_time_stamp; | 4294 | tp->snd_cwnd_stamp = tcp_time_stamp; |
@@ -4314,8 +4329,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) | |||
4314 | /* We ACK each frame or... */ | 4329 | /* We ACK each frame or... */ |
4315 | tcp_in_quickack_mode(sk) || | 4330 | tcp_in_quickack_mode(sk) || |
4316 | /* We have out of order data. */ | 4331 | /* We have out of order data. */ |
4317 | (ofo_possible && | 4332 | (ofo_possible && skb_peek(&tp->out_of_order_queue))) { |
4318 | skb_peek(&tp->out_of_order_queue))) { | ||
4319 | /* Then ack it now */ | 4333 | /* Then ack it now */ |
4320 | tcp_send_ack(sk); | 4334 | tcp_send_ack(sk); |
4321 | } else { | 4335 | } else { |
@@ -4343,7 +4357,7 @@ static inline void tcp_ack_snd_check(struct sock *sk) | |||
4343 | * either form (or just set the sysctl tcp_stdurg). | 4357 | * either form (or just set the sysctl tcp_stdurg). |
4344 | */ | 4358 | */ |
4345 | 4359 | ||
4346 | static void tcp_check_urg(struct sock * sk, struct tcphdr * th) | 4360 | static void tcp_check_urg(struct sock *sk, struct tcphdr *th) |
4347 | { | 4361 | { |
4348 | struct tcp_sock *tp = tcp_sk(sk); | 4362 | struct tcp_sock *tp = tcp_sk(sk); |
4349 | u32 ptr = ntohs(th->urg_ptr); | 4363 | u32 ptr = ntohs(th->urg_ptr); |
@@ -4392,8 +4406,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th) | |||
4392 | * buggy users. | 4406 | * buggy users. |
4393 | */ | 4407 | */ |
4394 | if (tp->urg_seq == tp->copied_seq && tp->urg_data && | 4408 | if (tp->urg_seq == tp->copied_seq && tp->urg_data && |
4395 | !sock_flag(sk, SOCK_URGINLINE) && | 4409 | !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { |
4396 | tp->copied_seq != tp->rcv_nxt) { | ||
4397 | struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); | 4410 | struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); |
4398 | tp->copied_seq++; | 4411 | tp->copied_seq++; |
4399 | if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { | 4412 | if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { |
@@ -4402,8 +4415,8 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th) | |||
4402 | } | 4415 | } |
4403 | } | 4416 | } |
4404 | 4417 | ||
4405 | tp->urg_data = TCP_URG_NOTYET; | 4418 | tp->urg_data = TCP_URG_NOTYET; |
4406 | tp->urg_seq = ptr; | 4419 | tp->urg_seq = ptr; |
4407 | 4420 | ||
4408 | /* Disable header prediction. */ | 4421 | /* Disable header prediction. */ |
4409 | tp->pred_flags = 0; | 4422 | tp->pred_flags = 0; |
@@ -4416,7 +4429,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) | |||
4416 | 4429 | ||
4417 | /* Check if we get a new urgent pointer - normally not. */ | 4430 | /* Check if we get a new urgent pointer - normally not. */ |
4418 | if (th->urg) | 4431 | if (th->urg) |
4419 | tcp_check_urg(sk,th); | 4432 | tcp_check_urg(sk, th); |
4420 | 4433 | ||
4421 | /* Do we wait for any urgent data? - normally not... */ | 4434 | /* Do we wait for any urgent data? - normally not... */ |
4422 | if (tp->urg_data == TCP_URG_NOTYET) { | 4435 | if (tp->urg_data == TCP_URG_NOTYET) { |
@@ -4458,7 +4471,8 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) | |||
4458 | return err; | 4471 | return err; |
4459 | } | 4472 | } |
4460 | 4473 | ||
4461 | static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) | 4474 | static __sum16 __tcp_checksum_complete_user(struct sock *sk, |
4475 | struct sk_buff *skb) | ||
4462 | { | 4476 | { |
4463 | __sum16 result; | 4477 | __sum16 result; |
4464 | 4478 | ||
@@ -4472,14 +4486,16 @@ static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb | |||
4472 | return result; | 4486 | return result; |
4473 | } | 4487 | } |
4474 | 4488 | ||
4475 | static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) | 4489 | static inline int tcp_checksum_complete_user(struct sock *sk, |
4490 | struct sk_buff *skb) | ||
4476 | { | 4491 | { |
4477 | return !skb_csum_unnecessary(skb) && | 4492 | return !skb_csum_unnecessary(skb) && |
4478 | __tcp_checksum_complete_user(sk, skb); | 4493 | __tcp_checksum_complete_user(sk, skb); |
4479 | } | 4494 | } |
4480 | 4495 | ||
4481 | #ifdef CONFIG_NET_DMA | 4496 | #ifdef CONFIG_NET_DMA |
4482 | static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen) | 4497 | static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, |
4498 | int hlen) | ||
4483 | { | 4499 | { |
4484 | struct tcp_sock *tp = tcp_sk(sk); | 4500 | struct tcp_sock *tp = tcp_sk(sk); |
4485 | int chunk = skb->len - hlen; | 4501 | int chunk = skb->len - hlen; |
@@ -4495,7 +4511,9 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen | |||
4495 | if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { | 4511 | if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { |
4496 | 4512 | ||
4497 | dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, | 4513 | dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, |
4498 | skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list); | 4514 | skb, hlen, |
4515 | tp->ucopy.iov, chunk, | ||
4516 | tp->ucopy.pinned_list); | ||
4499 | 4517 | ||
4500 | if (dma_cookie < 0) | 4518 | if (dma_cookie < 0) |
4501 | goto out; | 4519 | goto out; |
@@ -4577,7 +4595,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4577 | */ | 4595 | */ |
4578 | 4596 | ||
4579 | if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && | 4597 | if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && |
4580 | TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { | 4598 | TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { |
4581 | int tcp_header_len = tp->tcp_header_len; | 4599 | int tcp_header_len = tp->tcp_header_len; |
4582 | 4600 | ||
4583 | /* Timestamp header prediction: tcp_header_len | 4601 | /* Timestamp header prediction: tcp_header_len |
@@ -4646,7 +4664,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4646 | eaten = 1; | 4664 | eaten = 1; |
4647 | } | 4665 | } |
4648 | #endif | 4666 | #endif |
4649 | if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) { | 4667 | if (tp->ucopy.task == current && |
4668 | sock_owned_by_user(sk) && !copied_early) { | ||
4650 | __set_current_state(TASK_RUNNING); | 4669 | __set_current_state(TASK_RUNNING); |
4651 | 4670 | ||
4652 | if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) | 4671 | if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) |
@@ -4693,7 +4712,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4693 | NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS); | 4712 | NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS); |
4694 | 4713 | ||
4695 | /* Bulk data transfer: receiver */ | 4714 | /* Bulk data transfer: receiver */ |
4696 | __skb_pull(skb,tcp_header_len); | 4715 | __skb_pull(skb, tcp_header_len); |
4697 | __skb_queue_tail(&sk->sk_receive_queue, skb); | 4716 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
4698 | skb_set_owner_r(skb, sk); | 4717 | skb_set_owner_r(skb, sk); |
4699 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 4718 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
@@ -4725,7 +4744,7 @@ no_ack: | |||
4725 | } | 4744 | } |
4726 | 4745 | ||
4727 | slow_path: | 4746 | slow_path: |
4728 | if (len < (th->doff<<2) || tcp_checksum_complete_user(sk, skb)) | 4747 | if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) |
4729 | goto csum_error; | 4748 | goto csum_error; |
4730 | 4749 | ||
4731 | /* | 4750 | /* |
@@ -4975,7 +4994,8 @@ discard: | |||
4975 | } | 4994 | } |
4976 | 4995 | ||
4977 | /* PAWS check. */ | 4996 | /* PAWS check. */ |
4978 | if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && tcp_paws_check(&tp->rx_opt, 0)) | 4997 | if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && |
4998 | tcp_paws_check(&tp->rx_opt, 0)) | ||
4979 | goto discard_and_undo; | 4999 | goto discard_and_undo; |
4980 | 5000 | ||
4981 | if (th->syn) { | 5001 | if (th->syn) { |
@@ -5010,7 +5030,6 @@ discard: | |||
5010 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); | 5030 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
5011 | tcp_initialize_rcv_mss(sk); | 5031 | tcp_initialize_rcv_mss(sk); |
5012 | 5032 | ||
5013 | |||
5014 | tcp_send_synack(sk); | 5033 | tcp_send_synack(sk); |
5015 | #if 0 | 5034 | #if 0 |
5016 | /* Note, we could accept data and URG from this segment. | 5035 | /* Note, we could accept data and URG from this segment. |
@@ -5042,7 +5061,6 @@ reset_and_undo: | |||
5042 | return 1; | 5061 | return 1; |
5043 | } | 5062 | } |
5044 | 5063 | ||
5045 | |||
5046 | /* | 5064 | /* |
5047 | * This function implements the receiving procedure of RFC 793 for | 5065 | * This function implements the receiving procedure of RFC 793 for |
5048 | * all states except ESTABLISHED and TIME_WAIT. | 5066 | * all states except ESTABLISHED and TIME_WAIT. |
@@ -5164,7 +5182,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5164 | */ | 5182 | */ |
5165 | if (sk->sk_socket) | 5183 | if (sk->sk_socket) |
5166 | sk_wake_async(sk, | 5184 | sk_wake_async(sk, |
5167 | SOCK_WAKE_IO, POLL_OUT); | 5185 | SOCK_WAKE_IO, POLL_OUT); |
5168 | 5186 | ||
5169 | tp->snd_una = TCP_SKB_CB(skb)->ack_seq; | 5187 | tp->snd_una = TCP_SKB_CB(skb)->ack_seq; |
5170 | tp->snd_wnd = ntohs(th->window) << | 5188 | tp->snd_wnd = ntohs(th->window) << |
@@ -5176,8 +5194,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5176 | * and does not calculate rtt. | 5194 | * and does not calculate rtt. |
5177 | * Fix it at least with timestamps. | 5195 | * Fix it at least with timestamps. |
5178 | */ | 5196 | */ |
5179 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && | 5197 | if (tp->rx_opt.saw_tstamp && |
5180 | !tp->srtt) | 5198 | tp->rx_opt.rcv_tsecr && !tp->srtt) |
5181 | tcp_ack_saw_tstamp(sk, 0); | 5199 | tcp_ack_saw_tstamp(sk, 0); |
5182 | 5200 | ||
5183 | if (tp->rx_opt.tstamp_ok) | 5201 | if (tp->rx_opt.tstamp_ok) |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 454cf84b6154..bb7e80a284e7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -221,14 +221,14 @@ void tcp_select_initial_window(int __space, __u32 mss, | |||
221 | * following RFC2414. Senders, not following this RFC, | 221 | * following RFC2414. Senders, not following this RFC, |
222 | * will be satisfied with 2. | 222 | * will be satisfied with 2. |
223 | */ | 223 | */ |
224 | if (mss > (1<<*rcv_wscale)) { | 224 | if (mss > (1 << *rcv_wscale)) { |
225 | int init_cwnd = 4; | 225 | int init_cwnd = 4; |
226 | if (mss > 1460*3) | 226 | if (mss > 1460 * 3) |
227 | init_cwnd = 2; | 227 | init_cwnd = 2; |
228 | else if (mss > 1460) | 228 | else if (mss > 1460) |
229 | init_cwnd = 3; | 229 | init_cwnd = 3; |
230 | if (*rcv_wnd > init_cwnd*mss) | 230 | if (*rcv_wnd > init_cwnd * mss) |
231 | *rcv_wnd = init_cwnd*mss; | 231 | *rcv_wnd = init_cwnd * mss; |
232 | } | 232 | } |
233 | 233 | ||
234 | /* Set the clamp no higher than max representable value */ | 234 | /* Set the clamp no higher than max representable value */ |
@@ -278,11 +278,10 @@ static u16 tcp_select_window(struct sock *sk) | |||
278 | return new_win; | 278 | return new_win; |
279 | } | 279 | } |
280 | 280 | ||
281 | static inline void TCP_ECN_send_synack(struct tcp_sock *tp, | 281 | static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) |
282 | struct sk_buff *skb) | ||
283 | { | 282 | { |
284 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; | 283 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; |
285 | if (!(tp->ecn_flags&TCP_ECN_OK)) | 284 | if (!(tp->ecn_flags & TCP_ECN_OK)) |
286 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; | 285 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; |
287 | } | 286 | } |
288 | 287 | ||
@@ -292,7 +291,7 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) | |||
292 | 291 | ||
293 | tp->ecn_flags = 0; | 292 | tp->ecn_flags = 0; |
294 | if (sysctl_tcp_ecn) { | 293 | if (sysctl_tcp_ecn) { |
295 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR; | 294 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR; |
296 | tp->ecn_flags = TCP_ECN_OK; | 295 | tp->ecn_flags = TCP_ECN_OK; |
297 | } | 296 | } |
298 | } | 297 | } |
@@ -314,7 +313,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, | |||
314 | if (skb->len != tcp_header_len && | 313 | if (skb->len != tcp_header_len && |
315 | !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { | 314 | !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { |
316 | INET_ECN_xmit(sk); | 315 | INET_ECN_xmit(sk); |
317 | if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) { | 316 | if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { |
318 | tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; | 317 | tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; |
319 | tcp_hdr(skb)->cwr = 1; | 318 | tcp_hdr(skb)->cwr = 1; |
320 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; | 319 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
@@ -431,7 +430,7 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, | |||
431 | (TCPOPT_NOP << 16) | | 430 | (TCPOPT_NOP << 16) | |
432 | (TCPOPT_MD5SIG << 8) | | 431 | (TCPOPT_MD5SIG << 8) | |
433 | TCPOLEN_MD5SIG); | 432 | TCPOLEN_MD5SIG); |
434 | *md5_hash = (__u8 *) ptr; | 433 | *md5_hash = (__u8 *)ptr; |
435 | } | 434 | } |
436 | #endif | 435 | #endif |
437 | } | 436 | } |
@@ -447,7 +446,8 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, | |||
447 | * We are working here with either a clone of the original | 446 | * We are working here with either a clone of the original |
448 | * SKB, or a fresh unique copy made by the retransmit engine. | 447 | * SKB, or a fresh unique copy made by the retransmit engine. |
449 | */ | 448 | */ |
450 | static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask) | 449 | static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
450 | gfp_t gfp_mask) | ||
451 | { | 451 | { |
452 | const struct inet_connection_sock *icsk = inet_csk(sk); | 452 | const struct inet_connection_sock *icsk = inet_csk(sk); |
453 | struct inet_sock *inet; | 453 | struct inet_sock *inet; |
@@ -551,8 +551,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
551 | th->urg_ptr = 0; | 551 | th->urg_ptr = 0; |
552 | 552 | ||
553 | if (unlikely(tp->urg_mode && | 553 | if (unlikely(tp->urg_mode && |
554 | between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) { | 554 | between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF))) { |
555 | th->urg_ptr = htons(tp->snd_up-tcb->seq); | 555 | th->urg_ptr = htons(tp->snd_up - tcb->seq); |
556 | th->urg = 1; | 556 | th->urg = 1; |
557 | } | 557 | } |
558 | 558 | ||
@@ -616,7 +616,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
616 | #undef SYSCTL_FLAG_SACK | 616 | #undef SYSCTL_FLAG_SACK |
617 | } | 617 | } |
618 | 618 | ||
619 | |||
620 | /* This routine just queue's the buffer | 619 | /* This routine just queue's the buffer |
621 | * | 620 | * |
622 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, | 621 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, |
@@ -634,7 +633,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) | |||
634 | sk_mem_charge(sk, skb->truesize); | 633 | sk_mem_charge(sk, skb->truesize); |
635 | } | 634 | } |
636 | 635 | ||
637 | static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) | 636 | static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, |
637 | unsigned int mss_now) | ||
638 | { | 638 | { |
639 | if (skb->len <= mss_now || !sk_can_gso(sk)) { | 639 | if (skb->len <= mss_now || !sk_can_gso(sk)) { |
640 | /* Avoid the costly divide in the normal | 640 | /* Avoid the costly divide in the normal |
@@ -670,7 +670,8 @@ static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb, | |||
670 | * packet to the list. This won't be called frequently, I hope. | 670 | * packet to the list. This won't be called frequently, I hope. |
671 | * Remember, these are still headerless SKBs at this point. | 671 | * Remember, these are still headerless SKBs at this point. |
672 | */ | 672 | */ |
673 | int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) | 673 | int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, |
674 | unsigned int mss_now) | ||
674 | { | 675 | { |
675 | struct tcp_sock *tp = tcp_sk(sk); | 676 | struct tcp_sock *tp = tcp_sk(sk); |
676 | struct sk_buff *buff; | 677 | struct sk_buff *buff; |
@@ -708,13 +709,14 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss | |||
708 | 709 | ||
709 | /* PSH and FIN should only be set in the second packet. */ | 710 | /* PSH and FIN should only be set in the second packet. */ |
710 | flags = TCP_SKB_CB(skb)->flags; | 711 | flags = TCP_SKB_CB(skb)->flags; |
711 | TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); | 712 | TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); |
712 | TCP_SKB_CB(buff)->flags = flags; | 713 | TCP_SKB_CB(buff)->flags = flags; |
713 | TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; | 714 | TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; |
714 | 715 | ||
715 | if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { | 716 | if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { |
716 | /* Copy and checksum data tail into the new buffer. */ | 717 | /* Copy and checksum data tail into the new buffer. */ |
717 | buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize), | 718 | buff->csum = csum_partial_copy_nocheck(skb->data + len, |
719 | skb_put(buff, nsize), | ||
718 | nsize, 0); | 720 | nsize, 0); |
719 | 721 | ||
720 | skb_trim(skb, len); | 722 | skb_trim(skb, len); |
@@ -781,7 +783,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) | |||
781 | 783 | ||
782 | eat = len; | 784 | eat = len; |
783 | k = 0; | 785 | k = 0; |
784 | for (i=0; i<skb_shinfo(skb)->nr_frags; i++) { | 786 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
785 | if (skb_shinfo(skb)->frags[i].size <= eat) { | 787 | if (skb_shinfo(skb)->frags[i].size <= eat) { |
786 | put_page(skb_shinfo(skb)->frags[i].page); | 788 | put_page(skb_shinfo(skb)->frags[i].page); |
787 | eat -= skb_shinfo(skb)->frags[i].size; | 789 | eat -= skb_shinfo(skb)->frags[i].size; |
@@ -804,8 +806,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) | |||
804 | 806 | ||
805 | int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) | 807 | int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) |
806 | { | 808 | { |
807 | if (skb_cloned(skb) && | 809 | if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) |
808 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | ||
809 | return -ENOMEM; | 810 | return -ENOMEM; |
810 | 811 | ||
811 | /* If len == headlen, we avoid __skb_pull to preserve alignment. */ | 812 | /* If len == headlen, we avoid __skb_pull to preserve alignment. */ |
@@ -909,7 +910,6 @@ void tcp_mtup_init(struct sock *sk) | |||
909 | NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache | 910 | NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache |
910 | are READ ONLY outside this function. --ANK (980731) | 911 | are READ ONLY outside this function. --ANK (980731) |
911 | */ | 912 | */ |
912 | |||
913 | unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) | 913 | unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) |
914 | { | 914 | { |
915 | struct tcp_sock *tp = tcp_sk(sk); | 915 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -922,8 +922,8 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) | |||
922 | mss_now = tcp_mtu_to_mss(sk, pmtu); | 922 | mss_now = tcp_mtu_to_mss(sk, pmtu); |
923 | 923 | ||
924 | /* Bound mss with half of window */ | 924 | /* Bound mss with half of window */ |
925 | if (tp->max_window && mss_now > (tp->max_window>>1)) | 925 | if (tp->max_window && mss_now > (tp->max_window >> 1)) |
926 | mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len); | 926 | mss_now = max((tp->max_window >> 1), 68U - tp->tcp_header_len); |
927 | 927 | ||
928 | /* And store cached results */ | 928 | /* And store cached results */ |
929 | icsk->icsk_pmtu_cookie = pmtu; | 929 | icsk->icsk_pmtu_cookie = pmtu; |
@@ -977,8 +977,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | |||
977 | inet_csk(sk)->icsk_ext_hdr_len - | 977 | inet_csk(sk)->icsk_ext_hdr_len - |
978 | tp->tcp_header_len); | 978 | tp->tcp_header_len); |
979 | 979 | ||
980 | if (tp->max_window && | 980 | if (tp->max_window && (xmit_size_goal > (tp->max_window >> 1))) |
981 | (xmit_size_goal > (tp->max_window >> 1))) | ||
982 | xmit_size_goal = max((tp->max_window >> 1), | 981 | xmit_size_goal = max((tp->max_window >> 1), |
983 | 68U - tp->tcp_header_len); | 982 | 68U - tp->tcp_header_len); |
984 | 983 | ||
@@ -990,7 +989,6 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | |||
990 | } | 989 | } |
991 | 990 | ||
992 | /* Congestion window validation. (RFC2861) */ | 991 | /* Congestion window validation. (RFC2861) */ |
993 | |||
994 | static void tcp_cwnd_validate(struct sock *sk) | 992 | static void tcp_cwnd_validate(struct sock *sk) |
995 | { | 993 | { |
996 | struct tcp_sock *tp = tcp_sk(sk); | 994 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -1017,8 +1015,7 @@ static void tcp_cwnd_validate(struct sock *sk) | |||
1017 | * per input skb which could be mostly avoided here (if desired). | 1015 | * per input skb which could be mostly avoided here (if desired). |
1018 | */ | 1016 | */ |
1019 | static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, | 1017 | static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, |
1020 | unsigned int mss_now, | 1018 | unsigned int mss_now, unsigned int cwnd) |
1021 | unsigned int cwnd) | ||
1022 | { | 1019 | { |
1023 | struct tcp_sock *tp = tcp_sk(sk); | 1020 | struct tcp_sock *tp = tcp_sk(sk); |
1024 | u32 needed, window, cwnd_len; | 1021 | u32 needed, window, cwnd_len; |
@@ -1039,7 +1036,8 @@ static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, | |||
1039 | /* Can at least one segment of SKB be sent right now, according to the | 1036 | /* Can at least one segment of SKB be sent right now, according to the |
1040 | * congestion window rules? If so, return how many segments are allowed. | 1037 | * congestion window rules? If so, return how many segments are allowed. |
1041 | */ | 1038 | */ |
1042 | static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb) | 1039 | static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, |
1040 | struct sk_buff *skb) | ||
1043 | { | 1041 | { |
1044 | u32 in_flight, cwnd; | 1042 | u32 in_flight, cwnd; |
1045 | 1043 | ||
@@ -1059,13 +1057,12 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *sk | |||
1059 | /* This must be invoked the first time we consider transmitting | 1057 | /* This must be invoked the first time we consider transmitting |
1060 | * SKB onto the wire. | 1058 | * SKB onto the wire. |
1061 | */ | 1059 | */ |
1062 | static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) | 1060 | static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, |
1061 | unsigned int mss_now) | ||
1063 | { | 1062 | { |
1064 | int tso_segs = tcp_skb_pcount(skb); | 1063 | int tso_segs = tcp_skb_pcount(skb); |
1065 | 1064 | ||
1066 | if (!tso_segs || | 1065 | if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { |
1067 | (tso_segs > 1 && | ||
1068 | tcp_skb_mss(skb) != mss_now)) { | ||
1069 | tcp_set_skb_tso_segs(sk, skb, mss_now); | 1066 | tcp_set_skb_tso_segs(sk, skb, mss_now); |
1070 | tso_segs = tcp_skb_pcount(skb); | 1067 | tso_segs = tcp_skb_pcount(skb); |
1071 | } | 1068 | } |
@@ -1085,16 +1082,13 @@ static inline int tcp_minshall_check(const struct tcp_sock *tp) | |||
1085 | * 4. Or TCP_CORK is not set, and all sent packets are ACKed. | 1082 | * 4. Or TCP_CORK is not set, and all sent packets are ACKed. |
1086 | * With Minshall's modification: all sent small packets are ACKed. | 1083 | * With Minshall's modification: all sent small packets are ACKed. |
1087 | */ | 1084 | */ |
1088 | |||
1089 | static inline int tcp_nagle_check(const struct tcp_sock *tp, | 1085 | static inline int tcp_nagle_check(const struct tcp_sock *tp, |
1090 | const struct sk_buff *skb, | 1086 | const struct sk_buff *skb, |
1091 | unsigned mss_now, int nonagle) | 1087 | unsigned mss_now, int nonagle) |
1092 | { | 1088 | { |
1093 | return (skb->len < mss_now && | 1089 | return (skb->len < mss_now && |
1094 | ((nonagle&TCP_NAGLE_CORK) || | 1090 | ((nonagle & TCP_NAGLE_CORK) || |
1095 | (!nonagle && | 1091 | (!nonagle && tp->packets_out && tcp_minshall_check(tp)))); |
1096 | tp->packets_out && | ||
1097 | tcp_minshall_check(tp)))); | ||
1098 | } | 1092 | } |
1099 | 1093 | ||
1100 | /* Return non-zero if the Nagle test allows this packet to be | 1094 | /* Return non-zero if the Nagle test allows this packet to be |
@@ -1126,7 +1120,8 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, | |||
1126 | } | 1120 | } |
1127 | 1121 | ||
1128 | /* Does at least the first segment of SKB fit into the send window? */ | 1122 | /* Does at least the first segment of SKB fit into the send window? */ |
1129 | static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss) | 1123 | static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, |
1124 | unsigned int cur_mss) | ||
1130 | { | 1125 | { |
1131 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; | 1126 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; |
1132 | 1127 | ||
@@ -1152,8 +1147,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, | |||
1152 | return 0; | 1147 | return 0; |
1153 | 1148 | ||
1154 | cwnd_quota = tcp_cwnd_test(tp, skb); | 1149 | cwnd_quota = tcp_cwnd_test(tp, skb); |
1155 | if (cwnd_quota && | 1150 | if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) |
1156 | !tcp_snd_wnd_test(tp, skb, cur_mss)) | ||
1157 | cwnd_quota = 0; | 1151 | cwnd_quota = 0; |
1158 | 1152 | ||
1159 | return cwnd_quota; | 1153 | return cwnd_quota; |
@@ -1177,7 +1171,8 @@ int tcp_may_send_now(struct sock *sk) | |||
1177 | * know that all the data is in scatter-gather pages, and that the | 1171 | * know that all the data is in scatter-gather pages, and that the |
1178 | * packet has never been sent out before (and thus is not cloned). | 1172 | * packet has never been sent out before (and thus is not cloned). |
1179 | */ | 1173 | */ |
1180 | static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now) | 1174 | static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, |
1175 | unsigned int mss_now) | ||
1181 | { | 1176 | { |
1182 | struct sk_buff *buff; | 1177 | struct sk_buff *buff; |
1183 | int nlen = skb->len - len; | 1178 | int nlen = skb->len - len; |
@@ -1203,7 +1198,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, | |||
1203 | 1198 | ||
1204 | /* PSH and FIN should only be set in the second packet. */ | 1199 | /* PSH and FIN should only be set in the second packet. */ |
1205 | flags = TCP_SKB_CB(skb)->flags; | 1200 | flags = TCP_SKB_CB(skb)->flags; |
1206 | TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); | 1201 | TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); |
1207 | TCP_SKB_CB(buff)->flags = flags; | 1202 | TCP_SKB_CB(buff)->flags = flags; |
1208 | 1203 | ||
1209 | /* This packet was never sent out yet, so no SACK bits. */ | 1204 | /* This packet was never sent out yet, so no SACK bits. */ |
@@ -1247,8 +1242,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) | |||
1247 | 1242 | ||
1248 | in_flight = tcp_packets_in_flight(tp); | 1243 | in_flight = tcp_packets_in_flight(tp); |
1249 | 1244 | ||
1250 | BUG_ON(tcp_skb_pcount(skb) <= 1 || | 1245 | BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); |
1251 | (tp->snd_cwnd <= in_flight)); | ||
1252 | 1246 | ||
1253 | send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; | 1247 | send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; |
1254 | 1248 | ||
@@ -1281,7 +1275,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) | |||
1281 | } | 1275 | } |
1282 | 1276 | ||
1283 | /* Ok, it looks like it is advisable to defer. */ | 1277 | /* Ok, it looks like it is advisable to defer. */ |
1284 | tp->tso_deferred = 1 | (jiffies<<1); | 1278 | tp->tso_deferred = 1 | (jiffies << 1); |
1285 | 1279 | ||
1286 | return 1; | 1280 | return 1; |
1287 | 1281 | ||
@@ -1293,7 +1287,8 @@ send_now: | |||
1293 | /* Create a new MTU probe if we are ready. | 1287 | /* Create a new MTU probe if we are ready. |
1294 | * Returns 0 if we should wait to probe (no cwnd available), | 1288 | * Returns 0 if we should wait to probe (no cwnd available), |
1295 | * 1 if a probe was sent, | 1289 | * 1 if a probe was sent, |
1296 | * -1 otherwise */ | 1290 | * -1 otherwise |
1291 | */ | ||
1297 | static int tcp_mtu_probe(struct sock *sk) | 1292 | static int tcp_mtu_probe(struct sock *sk) |
1298 | { | 1293 | { |
1299 | struct tcp_sock *tp = tcp_sk(sk); | 1294 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -1318,7 +1313,7 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1318 | 1313 | ||
1319 | /* Very simple search strategy: just double the MSS. */ | 1314 | /* Very simple search strategy: just double the MSS. */ |
1320 | mss_now = tcp_current_mss(sk, 0); | 1315 | mss_now = tcp_current_mss(sk, 0); |
1321 | probe_size = 2*tp->mss_cache; | 1316 | probe_size = 2 * tp->mss_cache; |
1322 | size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; | 1317 | size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; |
1323 | if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { | 1318 | if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { |
1324 | /* TODO: set timer for probe_converge_event */ | 1319 | /* TODO: set timer for probe_converge_event */ |
@@ -1366,7 +1361,8 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1366 | skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); | 1361 | skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); |
1367 | else | 1362 | else |
1368 | nskb->csum = skb_copy_and_csum_bits(skb, 0, | 1363 | nskb->csum = skb_copy_and_csum_bits(skb, 0, |
1369 | skb_put(nskb, copy), copy, nskb->csum); | 1364 | skb_put(nskb, copy), |
1365 | copy, nskb->csum); | ||
1370 | 1366 | ||
1371 | if (skb->len <= copy) { | 1367 | if (skb->len <= copy) { |
1372 | /* We've eaten all the data from this skb. | 1368 | /* We've eaten all the data from this skb. |
@@ -1380,7 +1376,8 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1380 | if (!skb_shinfo(skb)->nr_frags) { | 1376 | if (!skb_shinfo(skb)->nr_frags) { |
1381 | skb_pull(skb, copy); | 1377 | skb_pull(skb, copy); |
1382 | if (skb->ip_summed != CHECKSUM_PARTIAL) | 1378 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
1383 | skb->csum = csum_partial(skb->data, skb->len, 0); | 1379 | skb->csum = csum_partial(skb->data, |
1380 | skb->len, 0); | ||
1384 | } else { | 1381 | } else { |
1385 | __pskb_trim_head(skb, copy); | 1382 | __pskb_trim_head(skb, copy); |
1386 | tcp_set_skb_tso_segs(sk, skb, mss_now); | 1383 | tcp_set_skb_tso_segs(sk, skb, mss_now); |
@@ -1400,7 +1397,7 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1400 | TCP_SKB_CB(nskb)->when = tcp_time_stamp; | 1397 | TCP_SKB_CB(nskb)->when = tcp_time_stamp; |
1401 | if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { | 1398 | if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { |
1402 | /* Decrement cwnd here because we are sending | 1399 | /* Decrement cwnd here because we are sending |
1403 | * effectively two packets. */ | 1400 | * effectively two packets. */ |
1404 | tp->snd_cwnd--; | 1401 | tp->snd_cwnd--; |
1405 | tcp_event_new_data_sent(sk, nskb); | 1402 | tcp_event_new_data_sent(sk, nskb); |
1406 | 1403 | ||
@@ -1414,7 +1411,6 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1414 | return -1; | 1411 | return -1; |
1415 | } | 1412 | } |
1416 | 1413 | ||
1417 | |||
1418 | /* This routine writes packets to the network. It advances the | 1414 | /* This routine writes packets to the network. It advances the |
1419 | * send_head. This happens as incoming acks open up the remote | 1415 | * send_head. This happens as incoming acks open up the remote |
1420 | * window for us. | 1416 | * window for us. |
@@ -1626,7 +1622,8 @@ u32 __tcp_select_window(struct sock *sk) | |||
1626 | icsk->icsk_ack.quick = 0; | 1622 | icsk->icsk_ack.quick = 0; |
1627 | 1623 | ||
1628 | if (tcp_memory_pressure) | 1624 | if (tcp_memory_pressure) |
1629 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); | 1625 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, |
1626 | 4U * tp->advmss); | ||
1630 | 1627 | ||
1631 | if (free_space < mss) | 1628 | if (free_space < mss) |
1632 | return 0; | 1629 | return 0; |
@@ -1659,7 +1656,7 @@ u32 __tcp_select_window(struct sock *sk) | |||
1659 | * is too small. | 1656 | * is too small. |
1660 | */ | 1657 | */ |
1661 | if (window <= free_space - mss || window > free_space) | 1658 | if (window <= free_space - mss || window > free_space) |
1662 | window = (free_space/mss)*mss; | 1659 | window = (free_space / mss) * mss; |
1663 | else if (mss == full_space && | 1660 | else if (mss == full_space && |
1664 | free_space > window + (full_space >> 1)) | 1661 | free_space > window + (full_space >> 1)) |
1665 | window = free_space; | 1662 | window = free_space; |
@@ -1669,7 +1666,8 @@ u32 __tcp_select_window(struct sock *sk) | |||
1669 | } | 1666 | } |
1670 | 1667 | ||
1671 | /* Attempt to collapse two adjacent SKB's during retransmission. */ | 1668 | /* Attempt to collapse two adjacent SKB's during retransmission. */ |
1672 | static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now) | 1669 | static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, |
1670 | int mss_now) | ||
1673 | { | 1671 | { |
1674 | struct tcp_sock *tp = tcp_sk(sk); | 1672 | struct tcp_sock *tp = tcp_sk(sk); |
1675 | struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); | 1673 | struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); |
@@ -1762,12 +1760,12 @@ void tcp_simple_retransmit(struct sock *sk) | |||
1762 | if (skb == tcp_send_head(sk)) | 1760 | if (skb == tcp_send_head(sk)) |
1763 | break; | 1761 | break; |
1764 | if (skb->len > mss && | 1762 | if (skb->len > mss && |
1765 | !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { | 1763 | !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { |
1766 | if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { | 1764 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { |
1767 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; | 1765 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; |
1768 | tp->retrans_out -= tcp_skb_pcount(skb); | 1766 | tp->retrans_out -= tcp_skb_pcount(skb); |
1769 | } | 1767 | } |
1770 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) { | 1768 | if (!(TCP_SKB_CB(skb)->sacked & TCPCB_LOST)) { |
1771 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | 1769 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
1772 | tp->lost_out += tcp_skb_pcount(skb); | 1770 | tp->lost_out += tcp_skb_pcount(skb); |
1773 | lost = 1; | 1771 | lost = 1; |
@@ -1846,8 +1844,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1846 | (skb->len < (cur_mss >> 1)) && | 1844 | (skb->len < (cur_mss >> 1)) && |
1847 | (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) && | 1845 | (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) && |
1848 | (!tcp_skb_is_last(sk, skb)) && | 1846 | (!tcp_skb_is_last(sk, skb)) && |
1849 | (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && | 1847 | (skb_shinfo(skb)->nr_frags == 0 && |
1850 | (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) && | 1848 | skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && |
1849 | (tcp_skb_pcount(skb) == 1 && | ||
1850 | tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) && | ||
1851 | (sysctl_tcp_retrans_collapse != 0)) | 1851 | (sysctl_tcp_retrans_collapse != 0)) |
1852 | tcp_retrans_try_collapse(sk, skb, cur_mss); | 1852 | tcp_retrans_try_collapse(sk, skb, cur_mss); |
1853 | 1853 | ||
@@ -1885,7 +1885,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1885 | tp->total_retrans++; | 1885 | tp->total_retrans++; |
1886 | 1886 | ||
1887 | #if FASTRETRANS_DEBUG > 0 | 1887 | #if FASTRETRANS_DEBUG > 0 |
1888 | if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { | 1888 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { |
1889 | if (net_ratelimit()) | 1889 | if (net_ratelimit()) |
1890 | printk(KERN_DEBUG "retrans_out leaked.\n"); | 1890 | printk(KERN_DEBUG "retrans_out leaked.\n"); |
1891 | } | 1891 | } |
@@ -1927,7 +1927,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1927 | if (tp->retransmit_skb_hint) { | 1927 | if (tp->retransmit_skb_hint) { |
1928 | skb = tp->retransmit_skb_hint; | 1928 | skb = tp->retransmit_skb_hint; |
1929 | packet_cnt = tp->retransmit_cnt_hint; | 1929 | packet_cnt = tp->retransmit_cnt_hint; |
1930 | }else{ | 1930 | } else { |
1931 | skb = tcp_write_queue_head(sk); | 1931 | skb = tcp_write_queue_head(sk); |
1932 | packet_cnt = 0; | 1932 | packet_cnt = 0; |
1933 | } | 1933 | } |
@@ -1954,7 +1954,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1954 | return; | 1954 | return; |
1955 | 1955 | ||
1956 | if (sacked & TCPCB_LOST) { | 1956 | if (sacked & TCPCB_LOST) { |
1957 | if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { | 1957 | if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { |
1958 | if (tcp_retransmit_skb(sk, skb)) { | 1958 | if (tcp_retransmit_skb(sk, skb)) { |
1959 | tp->retransmit_skb_hint = NULL; | 1959 | tp->retransmit_skb_hint = NULL; |
1960 | return; | 1960 | return; |
@@ -2036,7 +2036,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
2036 | } | 2036 | } |
2037 | } | 2037 | } |
2038 | 2038 | ||
2039 | |||
2040 | /* Send a fin. The caller locks the socket for us. This cannot be | 2039 | /* Send a fin. The caller locks the socket for us. This cannot be |
2041 | * allowed to fail queueing a FIN frame under any circumstances. | 2040 | * allowed to fail queueing a FIN frame under any circumstances. |
2042 | */ | 2041 | */ |
@@ -2122,14 +2121,14 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) | |||
2122 | */ | 2121 | */ |
2123 | int tcp_send_synack(struct sock *sk) | 2122 | int tcp_send_synack(struct sock *sk) |
2124 | { | 2123 | { |
2125 | struct sk_buff* skb; | 2124 | struct sk_buff *skb; |
2126 | 2125 | ||
2127 | skb = tcp_write_queue_head(sk); | 2126 | skb = tcp_write_queue_head(sk); |
2128 | if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) { | 2127 | if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) { |
2129 | printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); | 2128 | printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); |
2130 | return -EFAULT; | 2129 | return -EFAULT; |
2131 | } | 2130 | } |
2132 | if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) { | 2131 | if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) { |
2133 | if (skb_cloned(skb)) { | 2132 | if (skb_cloned(skb)) { |
2134 | struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); | 2133 | struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); |
2135 | if (nskb == NULL) | 2134 | if (nskb == NULL) |
@@ -2153,8 +2152,8 @@ int tcp_send_synack(struct sock *sk) | |||
2153 | /* | 2152 | /* |
2154 | * Prepare a SYN-ACK. | 2153 | * Prepare a SYN-ACK. |
2155 | */ | 2154 | */ |
2156 | struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | 2155 | struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, |
2157 | struct request_sock *req) | 2156 | struct request_sock *req) |
2158 | { | 2157 | { |
2159 | struct inet_request_sock *ireq = inet_rsk(req); | 2158 | struct inet_request_sock *ireq = inet_rsk(req); |
2160 | struct tcp_sock *tp = tcp_sk(sk); | 2159 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -2372,9 +2371,10 @@ void tcp_send_delayed_ack(struct sock *sk) | |||
2372 | 2371 | ||
2373 | if (ato > TCP_DELACK_MIN) { | 2372 | if (ato > TCP_DELACK_MIN) { |
2374 | const struct tcp_sock *tp = tcp_sk(sk); | 2373 | const struct tcp_sock *tp = tcp_sk(sk); |
2375 | int max_ato = HZ/2; | 2374 | int max_ato = HZ / 2; |
2376 | 2375 | ||
2377 | if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) | 2376 | if (icsk->icsk_ack.pingpong || |
2377 | (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) | ||
2378 | max_ato = TCP_DELACK_MAX; | 2378 | max_ato = TCP_DELACK_MAX; |
2379 | 2379 | ||
2380 | /* Slow path, intersegment interval is "high". */ | 2380 | /* Slow path, intersegment interval is "high". */ |
@@ -2384,7 +2384,7 @@ void tcp_send_delayed_ack(struct sock *sk) | |||
2384 | * directly. | 2384 | * directly. |
2385 | */ | 2385 | */ |
2386 | if (tp->srtt) { | 2386 | if (tp->srtt) { |
2387 | int rtt = max(tp->srtt>>3, TCP_DELACK_MIN); | 2387 | int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN); |
2388 | 2388 | ||
2389 | if (rtt < max_ato) | 2389 | if (rtt < max_ato) |
2390 | max_ato = rtt; | 2390 | max_ato = rtt; |