aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c513
1 files changed, 281 insertions, 232 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 53a8a5399f1e..1afb080bdf0c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -114,20 +114,21 @@ int sysctl_tcp_moderate_rcvbuf = 1;
114/* Adapt the MSS value used to make delayed ack decision to the 114/* Adapt the MSS value used to make delayed ack decision to the
115 * real world. 115 * real world.
116 */ 116 */
117static inline void tcp_measure_rcv_mss(struct tcp_sock *tp, 117static inline void tcp_measure_rcv_mss(struct sock *sk,
118 struct sk_buff *skb) 118 const struct sk_buff *skb)
119{ 119{
120 unsigned int len, lss; 120 struct inet_connection_sock *icsk = inet_csk(sk);
121 const unsigned int lss = icsk->icsk_ack.last_seg_size;
122 unsigned int len;
121 123
122 lss = tp->ack.last_seg_size; 124 icsk->icsk_ack.last_seg_size = 0;
123 tp->ack.last_seg_size = 0;
124 125
125 /* skb->len may jitter because of SACKs, even if peer 126 /* skb->len may jitter because of SACKs, even if peer
126 * sends good full-sized frames. 127 * sends good full-sized frames.
127 */ 128 */
128 len = skb->len; 129 len = skb->len;
129 if (len >= tp->ack.rcv_mss) { 130 if (len >= icsk->icsk_ack.rcv_mss) {
130 tp->ack.rcv_mss = len; 131 icsk->icsk_ack.rcv_mss = len;
131 } else { 132 } else {
132 /* Otherwise, we make more careful check taking into account, 133 /* Otherwise, we make more careful check taking into account,
133 * that SACKs block is variable. 134 * that SACKs block is variable.
@@ -147,41 +148,44 @@ static inline void tcp_measure_rcv_mss(struct tcp_sock *tp,
147 * tcp header plus fixed timestamp option length. 148 * tcp header plus fixed timestamp option length.
148 * Resulting "len" is MSS free of SACK jitter. 149 * Resulting "len" is MSS free of SACK jitter.
149 */ 150 */
150 len -= tp->tcp_header_len; 151 len -= tcp_sk(sk)->tcp_header_len;
151 tp->ack.last_seg_size = len; 152 icsk->icsk_ack.last_seg_size = len;
152 if (len == lss) { 153 if (len == lss) {
153 tp->ack.rcv_mss = len; 154 icsk->icsk_ack.rcv_mss = len;
154 return; 155 return;
155 } 156 }
156 } 157 }
157 tp->ack.pending |= TCP_ACK_PUSHED; 158 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
158 } 159 }
159} 160}
160 161
161static void tcp_incr_quickack(struct tcp_sock *tp) 162static void tcp_incr_quickack(struct sock *sk)
162{ 163{
163 unsigned quickacks = tp->rcv_wnd/(2*tp->ack.rcv_mss); 164 struct inet_connection_sock *icsk = inet_csk(sk);
165 unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
164 166
165 if (quickacks==0) 167 if (quickacks==0)
166 quickacks=2; 168 quickacks=2;
167 if (quickacks > tp->ack.quick) 169 if (quickacks > icsk->icsk_ack.quick)
168 tp->ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 170 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
169} 171}
170 172
171void tcp_enter_quickack_mode(struct tcp_sock *tp) 173void tcp_enter_quickack_mode(struct sock *sk)
172{ 174{
173 tcp_incr_quickack(tp); 175 struct inet_connection_sock *icsk = inet_csk(sk);
174 tp->ack.pingpong = 0; 176 tcp_incr_quickack(sk);
175 tp->ack.ato = TCP_ATO_MIN; 177 icsk->icsk_ack.pingpong = 0;
178 icsk->icsk_ack.ato = TCP_ATO_MIN;
176} 179}
177 180
178/* Send ACKs quickly, if "quick" count is not exhausted 181/* Send ACKs quickly, if "quick" count is not exhausted
179 * and the session is not interactive. 182 * and the session is not interactive.
180 */ 183 */
181 184
182static __inline__ int tcp_in_quickack_mode(struct tcp_sock *tp) 185static inline int tcp_in_quickack_mode(const struct sock *sk)
183{ 186{
184 return (tp->ack.quick && !tp->ack.pingpong); 187 const struct inet_connection_sock *icsk = inet_csk(sk);
188 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
185} 189}
186 190
187/* Buffer size and advertised window tuning. 191/* Buffer size and advertised window tuning.
@@ -224,8 +228,8 @@ static void tcp_fixup_sndbuf(struct sock *sk)
224 */ 228 */
225 229
226/* Slow part of check#2. */ 230/* Slow part of check#2. */
227static int __tcp_grow_window(struct sock *sk, struct tcp_sock *tp, 231static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
228 struct sk_buff *skb) 232 const struct sk_buff *skb)
229{ 233{
230 /* Optimize this! */ 234 /* Optimize this! */
231 int truesize = tcp_win_from_space(skb->truesize)/2; 235 int truesize = tcp_win_from_space(skb->truesize)/2;
@@ -233,7 +237,7 @@ static int __tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
233 237
234 while (tp->rcv_ssthresh <= window) { 238 while (tp->rcv_ssthresh <= window) {
235 if (truesize <= skb->len) 239 if (truesize <= skb->len)
236 return 2*tp->ack.rcv_mss; 240 return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
237 241
238 truesize >>= 1; 242 truesize >>= 1;
239 window >>= 1; 243 window >>= 1;
@@ -260,7 +264,7 @@ static inline void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
260 264
261 if (incr) { 265 if (incr) {
262 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); 266 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
263 tp->ack.quick |= 1; 267 inet_csk(sk)->icsk_ack.quick |= 1;
264 } 268 }
265 } 269 }
266} 270}
@@ -321,11 +325,12 @@ static void tcp_init_buffer_space(struct sock *sk)
321/* 5. Recalculate window clamp after socket hit its memory bounds. */ 325/* 5. Recalculate window clamp after socket hit its memory bounds. */
322static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) 326static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
323{ 327{
328 struct inet_connection_sock *icsk = inet_csk(sk);
324 struct sk_buff *skb; 329 struct sk_buff *skb;
325 unsigned int app_win = tp->rcv_nxt - tp->copied_seq; 330 unsigned int app_win = tp->rcv_nxt - tp->copied_seq;
326 int ofo_win = 0; 331 int ofo_win = 0;
327 332
328 tp->ack.quick = 0; 333 icsk->icsk_ack.quick = 0;
329 334
330 skb_queue_walk(&tp->out_of_order_queue, skb) { 335 skb_queue_walk(&tp->out_of_order_queue, skb) {
331 ofo_win += skb->len; 336 ofo_win += skb->len;
@@ -346,8 +351,8 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
346 app_win += ofo_win; 351 app_win += ofo_win;
347 if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf) 352 if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf)
348 app_win >>= 1; 353 app_win >>= 1;
349 if (app_win > tp->ack.rcv_mss) 354 if (app_win > icsk->icsk_ack.rcv_mss)
350 app_win -= tp->ack.rcv_mss; 355 app_win -= icsk->icsk_ack.rcv_mss;
351 app_win = max(app_win, 2U*tp->advmss); 356 app_win = max(app_win, 2U*tp->advmss);
352 357
353 if (!ofo_win) 358 if (!ofo_win)
@@ -415,11 +420,12 @@ new_measure:
415 tp->rcv_rtt_est.time = tcp_time_stamp; 420 tp->rcv_rtt_est.time = tcp_time_stamp;
416} 421}
417 422
418static inline void tcp_rcv_rtt_measure_ts(struct tcp_sock *tp, struct sk_buff *skb) 423static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb)
419{ 424{
425 struct tcp_sock *tp = tcp_sk(sk);
420 if (tp->rx_opt.rcv_tsecr && 426 if (tp->rx_opt.rcv_tsecr &&
421 (TCP_SKB_CB(skb)->end_seq - 427 (TCP_SKB_CB(skb)->end_seq -
422 TCP_SKB_CB(skb)->seq >= tp->ack.rcv_mss)) 428 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
423 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); 429 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0);
424} 430}
425 431
@@ -492,41 +498,42 @@ new_measure:
492 */ 498 */
493static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) 499static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
494{ 500{
501 struct inet_connection_sock *icsk = inet_csk(sk);
495 u32 now; 502 u32 now;
496 503
497 tcp_schedule_ack(tp); 504 inet_csk_schedule_ack(sk);
498 505
499 tcp_measure_rcv_mss(tp, skb); 506 tcp_measure_rcv_mss(sk, skb);
500 507
501 tcp_rcv_rtt_measure(tp); 508 tcp_rcv_rtt_measure(tp);
502 509
503 now = tcp_time_stamp; 510 now = tcp_time_stamp;
504 511
505 if (!tp->ack.ato) { 512 if (!icsk->icsk_ack.ato) {
506 /* The _first_ data packet received, initialize 513 /* The _first_ data packet received, initialize
507 * delayed ACK engine. 514 * delayed ACK engine.
508 */ 515 */
509 tcp_incr_quickack(tp); 516 tcp_incr_quickack(sk);
510 tp->ack.ato = TCP_ATO_MIN; 517 icsk->icsk_ack.ato = TCP_ATO_MIN;
511 } else { 518 } else {
512 int m = now - tp->ack.lrcvtime; 519 int m = now - icsk->icsk_ack.lrcvtime;
513 520
514 if (m <= TCP_ATO_MIN/2) { 521 if (m <= TCP_ATO_MIN/2) {
515 /* The fastest case is the first. */ 522 /* The fastest case is the first. */
516 tp->ack.ato = (tp->ack.ato>>1) + TCP_ATO_MIN/2; 523 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
517 } else if (m < tp->ack.ato) { 524 } else if (m < icsk->icsk_ack.ato) {
518 tp->ack.ato = (tp->ack.ato>>1) + m; 525 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
519 if (tp->ack.ato > tp->rto) 526 if (icsk->icsk_ack.ato > icsk->icsk_rto)
520 tp->ack.ato = tp->rto; 527 icsk->icsk_ack.ato = icsk->icsk_rto;
521 } else if (m > tp->rto) { 528 } else if (m > icsk->icsk_rto) {
522 /* Too long gap. Apparently sender falled to 529 /* Too long gap. Apparently sender falled to
523 * restart window, so that we send ACKs quickly. 530 * restart window, so that we send ACKs quickly.
524 */ 531 */
525 tcp_incr_quickack(tp); 532 tcp_incr_quickack(sk);
526 sk_stream_mem_reclaim(sk); 533 sk_stream_mem_reclaim(sk);
527 } 534 }
528 } 535 }
529 tp->ack.lrcvtime = now; 536 icsk->icsk_ack.lrcvtime = now;
530 537
531 TCP_ECN_check_ce(tp, skb); 538 TCP_ECN_check_ce(tp, skb);
532 539
@@ -543,8 +550,10 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
543 * To save cycles in the RFC 1323 implementation it was better to break 550 * To save cycles in the RFC 1323 implementation it was better to break
544 * it up into three procedures. -- erics 551 * it up into three procedures. -- erics
545 */ 552 */
546static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt, u32 *usrtt) 553static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt, u32 *usrtt)
547{ 554{
555 struct tcp_sock *tp = tcp_sk(sk);
556 const struct inet_connection_sock *icsk = inet_csk(sk);
548 long m = mrtt; /* RTT */ 557 long m = mrtt; /* RTT */
549 558
550 /* The following amusing code comes from Jacobson's 559 /* The following amusing code comes from Jacobson's
@@ -604,15 +613,16 @@ static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt, u32 *usrtt)
604 tp->rtt_seq = tp->snd_nxt; 613 tp->rtt_seq = tp->snd_nxt;
605 } 614 }
606 615
607 if (tp->ca_ops->rtt_sample) 616 if (icsk->icsk_ca_ops->rtt_sample)
608 tp->ca_ops->rtt_sample(tp, *usrtt); 617 icsk->icsk_ca_ops->rtt_sample(sk, *usrtt);
609} 618}
610 619
611/* Calculate rto without backoff. This is the second half of Van Jacobson's 620/* Calculate rto without backoff. This is the second half of Van Jacobson's
612 * routine referred to above. 621 * routine referred to above.
613 */ 622 */
614static inline void tcp_set_rto(struct tcp_sock *tp) 623static inline void tcp_set_rto(struct sock *sk)
615{ 624{
625 const struct tcp_sock *tp = tcp_sk(sk);
616 /* Old crap is replaced with new one. 8) 626 /* Old crap is replaced with new one. 8)
617 * 627 *
618 * More seriously: 628 * More seriously:
@@ -623,7 +633,7 @@ static inline void tcp_set_rto(struct tcp_sock *tp)
623 * is invisible. Actually, Linux-2.4 also generates erratic 633 * is invisible. Actually, Linux-2.4 also generates erratic
624 * ACKs in some curcumstances. 634 * ACKs in some curcumstances.
625 */ 635 */
626 tp->rto = (tp->srtt >> 3) + tp->rttvar; 636 inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
627 637
628 /* 2. Fixups made earlier cannot be right. 638 /* 2. Fixups made earlier cannot be right.
629 * If we do not estimate RTO correctly without them, 639 * If we do not estimate RTO correctly without them,
@@ -635,10 +645,10 @@ static inline void tcp_set_rto(struct tcp_sock *tp)
635/* NOTE: clamping at TCP_RTO_MIN is not required, current algo 645/* NOTE: clamping at TCP_RTO_MIN is not required, current algo
636 * guarantees that rto is higher. 646 * guarantees that rto is higher.
637 */ 647 */
638static inline void tcp_bound_rto(struct tcp_sock *tp) 648static inline void tcp_bound_rto(struct sock *sk)
639{ 649{
640 if (tp->rto > TCP_RTO_MAX) 650 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
641 tp->rto = TCP_RTO_MAX; 651 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
642} 652}
643 653
644/* Save metrics learned by this TCP session. 654/* Save metrics learned by this TCP session.
@@ -656,9 +666,10 @@ void tcp_update_metrics(struct sock *sk)
656 dst_confirm(dst); 666 dst_confirm(dst);
657 667
658 if (dst && (dst->flags&DST_HOST)) { 668 if (dst && (dst->flags&DST_HOST)) {
669 const struct inet_connection_sock *icsk = inet_csk(sk);
659 int m; 670 int m;
660 671
661 if (tp->backoff || !tp->srtt) { 672 if (icsk->icsk_backoff || !tp->srtt) {
662 /* This session failed to estimate rtt. Why? 673 /* This session failed to estimate rtt. Why?
663 * Probably, no packets returned in time. 674 * Probably, no packets returned in time.
664 * Reset our results. 675 * Reset our results.
@@ -707,7 +718,7 @@ void tcp_update_metrics(struct sock *sk)
707 tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) 718 tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
708 dst->metrics[RTAX_CWND-1] = tp->snd_cwnd; 719 dst->metrics[RTAX_CWND-1] = tp->snd_cwnd;
709 } else if (tp->snd_cwnd > tp->snd_ssthresh && 720 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
710 tp->ca_state == TCP_CA_Open) { 721 icsk->icsk_ca_state == TCP_CA_Open) {
711 /* Cong. avoidance phase, cwnd is reliable. */ 722 /* Cong. avoidance phase, cwnd is reliable. */
712 if (!dst_metric_locked(dst, RTAX_SSTHRESH)) 723 if (!dst_metric_locked(dst, RTAX_SSTHRESH))
713 dst->metrics[RTAX_SSTHRESH-1] = 724 dst->metrics[RTAX_SSTHRESH-1] =
@@ -801,9 +812,9 @@ static void tcp_init_metrics(struct sock *sk)
801 tp->mdev = dst_metric(dst, RTAX_RTTVAR); 812 tp->mdev = dst_metric(dst, RTAX_RTTVAR);
802 tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); 813 tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
803 } 814 }
804 tcp_set_rto(tp); 815 tcp_set_rto(sk);
805 tcp_bound_rto(tp); 816 tcp_bound_rto(sk);
806 if (tp->rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) 817 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
807 goto reset; 818 goto reset;
808 tp->snd_cwnd = tcp_init_cwnd(tp, dst); 819 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
809 tp->snd_cwnd_stamp = tcp_time_stamp; 820 tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -817,12 +828,14 @@ reset:
817 if (!tp->rx_opt.saw_tstamp && tp->srtt) { 828 if (!tp->rx_opt.saw_tstamp && tp->srtt) {
818 tp->srtt = 0; 829 tp->srtt = 0;
819 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; 830 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
820 tp->rto = TCP_TIMEOUT_INIT; 831 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
821 } 832 }
822} 833}
823 834
824static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts) 835static void tcp_update_reordering(struct sock *sk, const int metric,
836 const int ts)
825{ 837{
838 struct tcp_sock *tp = tcp_sk(sk);
826 if (metric > tp->reordering) { 839 if (metric > tp->reordering) {
827 tp->reordering = min(TCP_MAX_REORDERING, metric); 840 tp->reordering = min(TCP_MAX_REORDERING, metric);
828 841
@@ -837,7 +850,7 @@ static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts)
837 NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); 850 NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
838#if FASTRETRANS_DEBUG > 1 851#if FASTRETRANS_DEBUG > 1
839 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 852 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
840 tp->rx_opt.sack_ok, tp->ca_state, 853 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
841 tp->reordering, 854 tp->reordering,
842 tp->fackets_out, 855 tp->fackets_out,
843 tp->sacked_out, 856 tp->sacked_out,
@@ -899,6 +912,7 @@ static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts)
899static int 912static int
900tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una) 913tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
901{ 914{
915 const struct inet_connection_sock *icsk = inet_csk(sk);
902 struct tcp_sock *tp = tcp_sk(sk); 916 struct tcp_sock *tp = tcp_sk(sk);
903 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked; 917 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
904 struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2); 918 struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2);
@@ -1064,7 +1078,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1064 * we have to account for reordering! Ugly, 1078 * we have to account for reordering! Ugly,
1065 * but should help. 1079 * but should help.
1066 */ 1080 */
1067 if (lost_retrans && tp->ca_state == TCP_CA_Recovery) { 1081 if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) {
1068 struct sk_buff *skb; 1082 struct sk_buff *skb;
1069 1083
1070 sk_stream_for_retrans_queue(skb, sk) { 1084 sk_stream_for_retrans_queue(skb, sk) {
@@ -1093,8 +1107,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1093 1107
1094 tp->left_out = tp->sacked_out + tp->lost_out; 1108 tp->left_out = tp->sacked_out + tp->lost_out;
1095 1109
1096 if ((reord < tp->fackets_out) && tp->ca_state != TCP_CA_Loss) 1110 if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss)
1097 tcp_update_reordering(tp, ((tp->fackets_out + 1) - reord), 0); 1111 tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
1098 1112
1099#if FASTRETRANS_DEBUG > 0 1113#if FASTRETRANS_DEBUG > 0
1100 BUG_TRAP((int)tp->sacked_out >= 0); 1114 BUG_TRAP((int)tp->sacked_out >= 0);
@@ -1111,17 +1125,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1111 */ 1125 */
1112void tcp_enter_frto(struct sock *sk) 1126void tcp_enter_frto(struct sock *sk)
1113{ 1127{
1128 const struct inet_connection_sock *icsk = inet_csk(sk);
1114 struct tcp_sock *tp = tcp_sk(sk); 1129 struct tcp_sock *tp = tcp_sk(sk);
1115 struct sk_buff *skb; 1130 struct sk_buff *skb;
1116 1131
1117 tp->frto_counter = 1; 1132 tp->frto_counter = 1;
1118 1133
1119 if (tp->ca_state <= TCP_CA_Disorder || 1134 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
1120 tp->snd_una == tp->high_seq || 1135 tp->snd_una == tp->high_seq ||
1121 (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { 1136 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1122 tp->prior_ssthresh = tcp_current_ssthresh(tp); 1137 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1123 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 1138 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1124 tcp_ca_event(tp, CA_EVENT_FRTO); 1139 tcp_ca_event(sk, CA_EVENT_FRTO);
1125 } 1140 }
1126 1141
1127 /* Have to clear retransmission markers here to keep the bookkeeping 1142 /* Have to clear retransmission markers here to keep the bookkeeping
@@ -1138,7 +1153,7 @@ void tcp_enter_frto(struct sock *sk)
1138 } 1153 }
1139 tcp_sync_left_out(tp); 1154 tcp_sync_left_out(tp);
1140 1155
1141 tcp_set_ca_state(tp, TCP_CA_Open); 1156 tcp_set_ca_state(sk, TCP_CA_Open);
1142 tp->frto_highmark = tp->snd_nxt; 1157 tp->frto_highmark = tp->snd_nxt;
1143} 1158}
1144 1159
@@ -1184,7 +1199,7 @@ static void tcp_enter_frto_loss(struct sock *sk)
1184 1199
1185 tp->reordering = min_t(unsigned int, tp->reordering, 1200 tp->reordering = min_t(unsigned int, tp->reordering,
1186 sysctl_tcp_reordering); 1201 sysctl_tcp_reordering);
1187 tcp_set_ca_state(tp, TCP_CA_Loss); 1202 tcp_set_ca_state(sk, TCP_CA_Loss);
1188 tp->high_seq = tp->frto_highmark; 1203 tp->high_seq = tp->frto_highmark;
1189 TCP_ECN_queue_cwr(tp); 1204 TCP_ECN_queue_cwr(tp);
1190} 1205}
@@ -1208,16 +1223,17 @@ void tcp_clear_retrans(struct tcp_sock *tp)
1208 */ 1223 */
1209void tcp_enter_loss(struct sock *sk, int how) 1224void tcp_enter_loss(struct sock *sk, int how)
1210{ 1225{
1226 const struct inet_connection_sock *icsk = inet_csk(sk);
1211 struct tcp_sock *tp = tcp_sk(sk); 1227 struct tcp_sock *tp = tcp_sk(sk);
1212 struct sk_buff *skb; 1228 struct sk_buff *skb;
1213 int cnt = 0; 1229 int cnt = 0;
1214 1230
1215 /* Reduce ssthresh if it has not yet been made inside this window. */ 1231 /* Reduce ssthresh if it has not yet been made inside this window. */
1216 if (tp->ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || 1232 if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq ||
1217 (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { 1233 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1218 tp->prior_ssthresh = tcp_current_ssthresh(tp); 1234 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1219 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 1235 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1220 tcp_ca_event(tp, CA_EVENT_LOSS); 1236 tcp_ca_event(sk, CA_EVENT_LOSS);
1221 } 1237 }
1222 tp->snd_cwnd = 1; 1238 tp->snd_cwnd = 1;
1223 tp->snd_cwnd_cnt = 0; 1239 tp->snd_cwnd_cnt = 0;
@@ -1248,12 +1264,12 @@ void tcp_enter_loss(struct sock *sk, int how)
1248 1264
1249 tp->reordering = min_t(unsigned int, tp->reordering, 1265 tp->reordering = min_t(unsigned int, tp->reordering,
1250 sysctl_tcp_reordering); 1266 sysctl_tcp_reordering);
1251 tcp_set_ca_state(tp, TCP_CA_Loss); 1267 tcp_set_ca_state(sk, TCP_CA_Loss);
1252 tp->high_seq = tp->snd_nxt; 1268 tp->high_seq = tp->snd_nxt;
1253 TCP_ECN_queue_cwr(tp); 1269 TCP_ECN_queue_cwr(tp);
1254} 1270}
1255 1271
1256static int tcp_check_sack_reneging(struct sock *sk, struct tcp_sock *tp) 1272static int tcp_check_sack_reneging(struct sock *sk)
1257{ 1273{
1258 struct sk_buff *skb; 1274 struct sk_buff *skb;
1259 1275
@@ -1265,12 +1281,14 @@ static int tcp_check_sack_reneging(struct sock *sk, struct tcp_sock *tp)
1265 */ 1281 */
1266 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL && 1282 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
1267 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 1283 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
1284 struct inet_connection_sock *icsk = inet_csk(sk);
1268 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); 1285 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
1269 1286
1270 tcp_enter_loss(sk, 1); 1287 tcp_enter_loss(sk, 1);
1271 tp->retransmits++; 1288 icsk->icsk_retransmits++;
1272 tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)); 1289 tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
1273 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 1290 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1291 icsk->icsk_rto, TCP_RTO_MAX);
1274 return 1; 1292 return 1;
1275 } 1293 }
1276 return 0; 1294 return 0;
@@ -1281,15 +1299,15 @@ static inline int tcp_fackets_out(struct tcp_sock *tp)
1281 return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out; 1299 return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out;
1282} 1300}
1283 1301
1284static inline int tcp_skb_timedout(struct tcp_sock *tp, struct sk_buff *skb) 1302static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
1285{ 1303{
1286 return (tcp_time_stamp - TCP_SKB_CB(skb)->when > tp->rto); 1304 return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
1287} 1305}
1288 1306
1289static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) 1307static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
1290{ 1308{
1291 return tp->packets_out && 1309 return tp->packets_out &&
1292 tcp_skb_timedout(tp, skb_peek(&sk->sk_write_queue)); 1310 tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue));
1293} 1311}
1294 1312
1295/* Linux NewReno/SACK/FACK/ECN state machine. 1313/* Linux NewReno/SACK/FACK/ECN state machine.
@@ -1423,8 +1441,9 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
1423 * in assumption of absent reordering, interpret this as reordering. 1441 * in assumption of absent reordering, interpret this as reordering.
1424 * The only another reason could be bug in receiver TCP. 1442 * The only another reason could be bug in receiver TCP.
1425 */ 1443 */
1426static void tcp_check_reno_reordering(struct tcp_sock *tp, int addend) 1444static void tcp_check_reno_reordering(struct sock *sk, const int addend)
1427{ 1445{
1446 struct tcp_sock *tp = tcp_sk(sk);
1428 u32 holes; 1447 u32 holes;
1429 1448
1430 holes = max(tp->lost_out, 1U); 1449 holes = max(tp->lost_out, 1U);
@@ -1432,16 +1451,17 @@ static void tcp_check_reno_reordering(struct tcp_sock *tp, int addend)
1432 1451
1433 if ((tp->sacked_out + holes) > tp->packets_out) { 1452 if ((tp->sacked_out + holes) > tp->packets_out) {
1434 tp->sacked_out = tp->packets_out - holes; 1453 tp->sacked_out = tp->packets_out - holes;
1435 tcp_update_reordering(tp, tp->packets_out+addend, 0); 1454 tcp_update_reordering(sk, tp->packets_out + addend, 0);
1436 } 1455 }
1437} 1456}
1438 1457
1439/* Emulate SACKs for SACKless connection: account for a new dupack. */ 1458/* Emulate SACKs for SACKless connection: account for a new dupack. */
1440 1459
1441static void tcp_add_reno_sack(struct tcp_sock *tp) 1460static void tcp_add_reno_sack(struct sock *sk)
1442{ 1461{
1462 struct tcp_sock *tp = tcp_sk(sk);
1443 tp->sacked_out++; 1463 tp->sacked_out++;
1444 tcp_check_reno_reordering(tp, 0); 1464 tcp_check_reno_reordering(sk, 0);
1445 tcp_sync_left_out(tp); 1465 tcp_sync_left_out(tp);
1446} 1466}
1447 1467
@@ -1456,7 +1476,7 @@ static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acke
1456 else 1476 else
1457 tp->sacked_out -= acked-1; 1477 tp->sacked_out -= acked-1;
1458 } 1478 }
1459 tcp_check_reno_reordering(tp, acked); 1479 tcp_check_reno_reordering(sk, acked);
1460 tcp_sync_left_out(tp); 1480 tcp_sync_left_out(tp);
1461} 1481}
1462 1482
@@ -1509,7 +1529,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
1509 struct sk_buff *skb; 1529 struct sk_buff *skb;
1510 1530
1511 sk_stream_for_retrans_queue(skb, sk) { 1531 sk_stream_for_retrans_queue(skb, sk) {
1512 if (tcp_skb_timedout(tp, skb) && 1532 if (tcp_skb_timedout(sk, skb) &&
1513 !(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { 1533 !(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
1514 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1534 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1515 tp->lost_out += tcp_skb_pcount(skb); 1535 tp->lost_out += tcp_skb_pcount(skb);
@@ -1530,14 +1550,16 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
1530} 1550}
1531 1551
1532/* Decrease cwnd each second ack. */ 1552/* Decrease cwnd each second ack. */
1533static void tcp_cwnd_down(struct tcp_sock *tp) 1553static void tcp_cwnd_down(struct sock *sk)
1534{ 1554{
1555 const struct inet_connection_sock *icsk = inet_csk(sk);
1556 struct tcp_sock *tp = tcp_sk(sk);
1535 int decr = tp->snd_cwnd_cnt + 1; 1557 int decr = tp->snd_cwnd_cnt + 1;
1536 1558
1537 tp->snd_cwnd_cnt = decr&1; 1559 tp->snd_cwnd_cnt = decr&1;
1538 decr >>= 1; 1560 decr >>= 1;
1539 1561
1540 if (decr && tp->snd_cwnd > tp->ca_ops->min_cwnd(tp)) 1562 if (decr && tp->snd_cwnd > icsk->icsk_ca_ops->min_cwnd(sk))
1541 tp->snd_cwnd -= decr; 1563 tp->snd_cwnd -= decr;
1542 1564
1543 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); 1565 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
@@ -1571,11 +1593,15 @@ static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg)
1571#define DBGUNDO(x...) do { } while (0) 1593#define DBGUNDO(x...) do { } while (0)
1572#endif 1594#endif
1573 1595
1574static void tcp_undo_cwr(struct tcp_sock *tp, int undo) 1596static void tcp_undo_cwr(struct sock *sk, const int undo)
1575{ 1597{
1598 struct tcp_sock *tp = tcp_sk(sk);
1599
1576 if (tp->prior_ssthresh) { 1600 if (tp->prior_ssthresh) {
1577 if (tp->ca_ops->undo_cwnd) 1601 const struct inet_connection_sock *icsk = inet_csk(sk);
1578 tp->snd_cwnd = tp->ca_ops->undo_cwnd(tp); 1602
1603 if (icsk->icsk_ca_ops->undo_cwnd)
1604 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
1579 else 1605 else
1580 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1); 1606 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1);
1581 1607
@@ -1603,9 +1629,9 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
1603 /* Happy end! We did not retransmit anything 1629 /* Happy end! We did not retransmit anything
1604 * or our original transmission succeeded. 1630 * or our original transmission succeeded.
1605 */ 1631 */
1606 DBGUNDO(sk, tp, tp->ca_state == TCP_CA_Loss ? "loss" : "retrans"); 1632 DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
1607 tcp_undo_cwr(tp, 1); 1633 tcp_undo_cwr(sk, 1);
1608 if (tp->ca_state == TCP_CA_Loss) 1634 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
1609 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 1635 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
1610 else 1636 else
1611 NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); 1637 NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
@@ -1618,7 +1644,7 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
1618 tcp_moderate_cwnd(tp); 1644 tcp_moderate_cwnd(tp);
1619 return 1; 1645 return 1;
1620 } 1646 }
1621 tcp_set_ca_state(tp, TCP_CA_Open); 1647 tcp_set_ca_state(sk, TCP_CA_Open);
1622 return 0; 1648 return 0;
1623} 1649}
1624 1650
@@ -1627,7 +1653,7 @@ static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp)
1627{ 1653{
1628 if (tp->undo_marker && !tp->undo_retrans) { 1654 if (tp->undo_marker && !tp->undo_retrans) {
1629 DBGUNDO(sk, tp, "D-SACK"); 1655 DBGUNDO(sk, tp, "D-SACK");
1630 tcp_undo_cwr(tp, 1); 1656 tcp_undo_cwr(sk, 1);
1631 tp->undo_marker = 0; 1657 tp->undo_marker = 0;
1632 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); 1658 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
1633 } 1659 }
@@ -1648,10 +1674,10 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
1648 if (tp->retrans_out == 0) 1674 if (tp->retrans_out == 0)
1649 tp->retrans_stamp = 0; 1675 tp->retrans_stamp = 0;
1650 1676
1651 tcp_update_reordering(tp, tcp_fackets_out(tp)+acked, 1); 1677 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
1652 1678
1653 DBGUNDO(sk, tp, "Hoe"); 1679 DBGUNDO(sk, tp, "Hoe");
1654 tcp_undo_cwr(tp, 0); 1680 tcp_undo_cwr(sk, 0);
1655 NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); 1681 NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
1656 1682
1657 /* So... Do not make Hoe's retransmit yet. 1683 /* So... Do not make Hoe's retransmit yet.
@@ -1674,22 +1700,23 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
1674 DBGUNDO(sk, tp, "partial loss"); 1700 DBGUNDO(sk, tp, "partial loss");
1675 tp->lost_out = 0; 1701 tp->lost_out = 0;
1676 tp->left_out = tp->sacked_out; 1702 tp->left_out = tp->sacked_out;
1677 tcp_undo_cwr(tp, 1); 1703 tcp_undo_cwr(sk, 1);
1678 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 1704 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
1679 tp->retransmits = 0; 1705 inet_csk(sk)->icsk_retransmits = 0;
1680 tp->undo_marker = 0; 1706 tp->undo_marker = 0;
1681 if (!IsReno(tp)) 1707 if (!IsReno(tp))
1682 tcp_set_ca_state(tp, TCP_CA_Open); 1708 tcp_set_ca_state(sk, TCP_CA_Open);
1683 return 1; 1709 return 1;
1684 } 1710 }
1685 return 0; 1711 return 0;
1686} 1712}
1687 1713
1688static inline void tcp_complete_cwr(struct tcp_sock *tp) 1714static inline void tcp_complete_cwr(struct sock *sk)
1689{ 1715{
1716 struct tcp_sock *tp = tcp_sk(sk);
1690 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 1717 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
1691 tp->snd_cwnd_stamp = tcp_time_stamp; 1718 tp->snd_cwnd_stamp = tcp_time_stamp;
1692 tcp_ca_event(tp, CA_EVENT_COMPLETE_CWR); 1719 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
1693} 1720}
1694 1721
1695static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag) 1722static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
@@ -1700,21 +1727,21 @@ static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
1700 tp->retrans_stamp = 0; 1727 tp->retrans_stamp = 0;
1701 1728
1702 if (flag&FLAG_ECE) 1729 if (flag&FLAG_ECE)
1703 tcp_enter_cwr(tp); 1730 tcp_enter_cwr(sk);
1704 1731
1705 if (tp->ca_state != TCP_CA_CWR) { 1732 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
1706 int state = TCP_CA_Open; 1733 int state = TCP_CA_Open;
1707 1734
1708 if (tp->left_out || tp->retrans_out || tp->undo_marker) 1735 if (tp->left_out || tp->retrans_out || tp->undo_marker)
1709 state = TCP_CA_Disorder; 1736 state = TCP_CA_Disorder;
1710 1737
1711 if (tp->ca_state != state) { 1738 if (inet_csk(sk)->icsk_ca_state != state) {
1712 tcp_set_ca_state(tp, state); 1739 tcp_set_ca_state(sk, state);
1713 tp->high_seq = tp->snd_nxt; 1740 tp->high_seq = tp->snd_nxt;
1714 } 1741 }
1715 tcp_moderate_cwnd(tp); 1742 tcp_moderate_cwnd(tp);
1716 } else { 1743 } else {
1717 tcp_cwnd_down(tp); 1744 tcp_cwnd_down(sk);
1718 } 1745 }
1719} 1746}
1720 1747
@@ -1733,6 +1760,7 @@ static void
1733tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, 1760tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1734 int prior_packets, int flag) 1761 int prior_packets, int flag)
1735{ 1762{
1763 struct inet_connection_sock *icsk = inet_csk(sk);
1736 struct tcp_sock *tp = tcp_sk(sk); 1764 struct tcp_sock *tp = tcp_sk(sk);
1737 int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP)); 1765 int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP));
1738 1766
@@ -1750,13 +1778,13 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1750 tp->prior_ssthresh = 0; 1778 tp->prior_ssthresh = 0;
1751 1779
1752 /* B. In all the states check for reneging SACKs. */ 1780 /* B. In all the states check for reneging SACKs. */
1753 if (tp->sacked_out && tcp_check_sack_reneging(sk, tp)) 1781 if (tp->sacked_out && tcp_check_sack_reneging(sk))
1754 return; 1782 return;
1755 1783
1756 /* C. Process data loss notification, provided it is valid. */ 1784 /* C. Process data loss notification, provided it is valid. */
1757 if ((flag&FLAG_DATA_LOST) && 1785 if ((flag&FLAG_DATA_LOST) &&
1758 before(tp->snd_una, tp->high_seq) && 1786 before(tp->snd_una, tp->high_seq) &&
1759 tp->ca_state != TCP_CA_Open && 1787 icsk->icsk_ca_state != TCP_CA_Open &&
1760 tp->fackets_out > tp->reordering) { 1788 tp->fackets_out > tp->reordering) {
1761 tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq); 1789 tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
1762 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); 1790 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
@@ -1767,14 +1795,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1767 1795
1768 /* E. Check state exit conditions. State can be terminated 1796 /* E. Check state exit conditions. State can be terminated
1769 * when high_seq is ACKed. */ 1797 * when high_seq is ACKed. */
1770 if (tp->ca_state == TCP_CA_Open) { 1798 if (icsk->icsk_ca_state == TCP_CA_Open) {
1771 if (!sysctl_tcp_frto) 1799 if (!sysctl_tcp_frto)
1772 BUG_TRAP(tp->retrans_out == 0); 1800 BUG_TRAP(tp->retrans_out == 0);
1773 tp->retrans_stamp = 0; 1801 tp->retrans_stamp = 0;
1774 } else if (!before(tp->snd_una, tp->high_seq)) { 1802 } else if (!before(tp->snd_una, tp->high_seq)) {
1775 switch (tp->ca_state) { 1803 switch (icsk->icsk_ca_state) {
1776 case TCP_CA_Loss: 1804 case TCP_CA_Loss:
1777 tp->retransmits = 0; 1805 icsk->icsk_retransmits = 0;
1778 if (tcp_try_undo_recovery(sk, tp)) 1806 if (tcp_try_undo_recovery(sk, tp))
1779 return; 1807 return;
1780 break; 1808 break;
@@ -1783,8 +1811,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1783 /* CWR is to be held something *above* high_seq 1811 /* CWR is to be held something *above* high_seq
1784 * is ACKed for CWR bit to reach receiver. */ 1812 * is ACKed for CWR bit to reach receiver. */
1785 if (tp->snd_una != tp->high_seq) { 1813 if (tp->snd_una != tp->high_seq) {
1786 tcp_complete_cwr(tp); 1814 tcp_complete_cwr(sk);
1787 tcp_set_ca_state(tp, TCP_CA_Open); 1815 tcp_set_ca_state(sk, TCP_CA_Open);
1788 } 1816 }
1789 break; 1817 break;
1790 1818
@@ -1795,7 +1823,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1795 * catching for all duplicate ACKs. */ 1823 * catching for all duplicate ACKs. */
1796 IsReno(tp) || tp->snd_una != tp->high_seq) { 1824 IsReno(tp) || tp->snd_una != tp->high_seq) {
1797 tp->undo_marker = 0; 1825 tp->undo_marker = 0;
1798 tcp_set_ca_state(tp, TCP_CA_Open); 1826 tcp_set_ca_state(sk, TCP_CA_Open);
1799 } 1827 }
1800 break; 1828 break;
1801 1829
@@ -1804,17 +1832,17 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1804 tcp_reset_reno_sack(tp); 1832 tcp_reset_reno_sack(tp);
1805 if (tcp_try_undo_recovery(sk, tp)) 1833 if (tcp_try_undo_recovery(sk, tp))
1806 return; 1834 return;
1807 tcp_complete_cwr(tp); 1835 tcp_complete_cwr(sk);
1808 break; 1836 break;
1809 } 1837 }
1810 } 1838 }
1811 1839
1812 /* F. Process state. */ 1840 /* F. Process state. */
1813 switch (tp->ca_state) { 1841 switch (icsk->icsk_ca_state) {
1814 case TCP_CA_Recovery: 1842 case TCP_CA_Recovery:
1815 if (prior_snd_una == tp->snd_una) { 1843 if (prior_snd_una == tp->snd_una) {
1816 if (IsReno(tp) && is_dupack) 1844 if (IsReno(tp) && is_dupack)
1817 tcp_add_reno_sack(tp); 1845 tcp_add_reno_sack(sk);
1818 } else { 1846 } else {
1819 int acked = prior_packets - tp->packets_out; 1847 int acked = prior_packets - tp->packets_out;
1820 if (IsReno(tp)) 1848 if (IsReno(tp))
@@ -1824,13 +1852,13 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1824 break; 1852 break;
1825 case TCP_CA_Loss: 1853 case TCP_CA_Loss:
1826 if (flag&FLAG_DATA_ACKED) 1854 if (flag&FLAG_DATA_ACKED)
1827 tp->retransmits = 0; 1855 icsk->icsk_retransmits = 0;
1828 if (!tcp_try_undo_loss(sk, tp)) { 1856 if (!tcp_try_undo_loss(sk, tp)) {
1829 tcp_moderate_cwnd(tp); 1857 tcp_moderate_cwnd(tp);
1830 tcp_xmit_retransmit_queue(sk); 1858 tcp_xmit_retransmit_queue(sk);
1831 return; 1859 return;
1832 } 1860 }
1833 if (tp->ca_state != TCP_CA_Open) 1861 if (icsk->icsk_ca_state != TCP_CA_Open)
1834 return; 1862 return;
1835 /* Loss is undone; fall through to processing in Open state. */ 1863 /* Loss is undone; fall through to processing in Open state. */
1836 default: 1864 default:
@@ -1838,10 +1866,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1838 if (tp->snd_una != prior_snd_una) 1866 if (tp->snd_una != prior_snd_una)
1839 tcp_reset_reno_sack(tp); 1867 tcp_reset_reno_sack(tp);
1840 if (is_dupack) 1868 if (is_dupack)
1841 tcp_add_reno_sack(tp); 1869 tcp_add_reno_sack(sk);
1842 } 1870 }
1843 1871
1844 if (tp->ca_state == TCP_CA_Disorder) 1872 if (icsk->icsk_ca_state == TCP_CA_Disorder)
1845 tcp_try_undo_dsack(sk, tp); 1873 tcp_try_undo_dsack(sk, tp);
1846 1874
1847 if (!tcp_time_to_recover(sk, tp)) { 1875 if (!tcp_time_to_recover(sk, tp)) {
@@ -1861,30 +1889,28 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1861 tp->undo_marker = tp->snd_una; 1889 tp->undo_marker = tp->snd_una;
1862 tp->undo_retrans = tp->retrans_out; 1890 tp->undo_retrans = tp->retrans_out;
1863 1891
1864 if (tp->ca_state < TCP_CA_CWR) { 1892 if (icsk->icsk_ca_state < TCP_CA_CWR) {
1865 if (!(flag&FLAG_ECE)) 1893 if (!(flag&FLAG_ECE))
1866 tp->prior_ssthresh = tcp_current_ssthresh(tp); 1894 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1867 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 1895 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1868 TCP_ECN_queue_cwr(tp); 1896 TCP_ECN_queue_cwr(tp);
1869 } 1897 }
1870 1898
1871 tp->snd_cwnd_cnt = 0; 1899 tp->snd_cwnd_cnt = 0;
1872 tcp_set_ca_state(tp, TCP_CA_Recovery); 1900 tcp_set_ca_state(sk, TCP_CA_Recovery);
1873 } 1901 }
1874 1902
1875 if (is_dupack || tcp_head_timedout(sk, tp)) 1903 if (is_dupack || tcp_head_timedout(sk, tp))
1876 tcp_update_scoreboard(sk, tp); 1904 tcp_update_scoreboard(sk, tp);
1877 tcp_cwnd_down(tp); 1905 tcp_cwnd_down(sk);
1878 tcp_xmit_retransmit_queue(sk); 1906 tcp_xmit_retransmit_queue(sk);
1879} 1907}
1880 1908
1881/* Read draft-ietf-tcplw-high-performance before mucking 1909/* Read draft-ietf-tcplw-high-performance before mucking
1882 * with this code. (Superceeds RFC1323) 1910 * with this code. (Superceeds RFC1323)
1883 */ 1911 */
1884static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag) 1912static void tcp_ack_saw_tstamp(struct sock *sk, u32 *usrtt, int flag)
1885{ 1913{
1886 __u32 seq_rtt;
1887
1888 /* RTTM Rule: A TSecr value received in a segment is used to 1914 /* RTTM Rule: A TSecr value received in a segment is used to
1889 * update the averaged RTT measurement only if the segment 1915 * update the averaged RTT measurement only if the segment
1890 * acknowledges some new data, i.e., only if it advances the 1916 * acknowledges some new data, i.e., only if it advances the
@@ -1900,14 +1926,15 @@ static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag)
1900 * answer arrives rto becomes 120 seconds! If at least one of segments 1926 * answer arrives rto becomes 120 seconds! If at least one of segments
1901 * in window is lost... Voila. --ANK (010210) 1927 * in window is lost... Voila. --ANK (010210)
1902 */ 1928 */
1903 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 1929 struct tcp_sock *tp = tcp_sk(sk);
1904 tcp_rtt_estimator(tp, seq_rtt, usrtt); 1930 const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
1905 tcp_set_rto(tp); 1931 tcp_rtt_estimator(sk, seq_rtt, usrtt);
1906 tp->backoff = 0; 1932 tcp_set_rto(sk);
1907 tcp_bound_rto(tp); 1933 inet_csk(sk)->icsk_backoff = 0;
1934 tcp_bound_rto(sk);
1908} 1935}
1909 1936
1910static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int flag) 1937static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, u32 *usrtt, int flag)
1911{ 1938{
1912 /* We don't have a timestamp. Can only use 1939 /* We don't have a timestamp. Can only use
1913 * packets that are not retransmitted to determine 1940 * packets that are not retransmitted to determine
@@ -1921,27 +1948,29 @@ static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int
1921 if (flag & FLAG_RETRANS_DATA_ACKED) 1948 if (flag & FLAG_RETRANS_DATA_ACKED)
1922 return; 1949 return;
1923 1950
1924 tcp_rtt_estimator(tp, seq_rtt, usrtt); 1951 tcp_rtt_estimator(sk, seq_rtt, usrtt);
1925 tcp_set_rto(tp); 1952 tcp_set_rto(sk);
1926 tp->backoff = 0; 1953 inet_csk(sk)->icsk_backoff = 0;
1927 tcp_bound_rto(tp); 1954 tcp_bound_rto(sk);
1928} 1955}
1929 1956
1930static inline void tcp_ack_update_rtt(struct tcp_sock *tp, 1957static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
1931 int flag, s32 seq_rtt, u32 *usrtt) 1958 const s32 seq_rtt, u32 *usrtt)
1932{ 1959{
1960 const struct tcp_sock *tp = tcp_sk(sk);
1933 /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ 1961 /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
1934 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 1962 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
1935 tcp_ack_saw_tstamp(tp, usrtt, flag); 1963 tcp_ack_saw_tstamp(sk, usrtt, flag);
1936 else if (seq_rtt >= 0) 1964 else if (seq_rtt >= 0)
1937 tcp_ack_no_tstamp(tp, seq_rtt, usrtt, flag); 1965 tcp_ack_no_tstamp(sk, seq_rtt, usrtt, flag);
1938} 1966}
1939 1967
1940static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, 1968static inline void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
1941 u32 in_flight, int good) 1969 u32 in_flight, int good)
1942{ 1970{
1943 tp->ca_ops->cong_avoid(tp, ack, rtt, in_flight, good); 1971 const struct inet_connection_sock *icsk = inet_csk(sk);
1944 tp->snd_cwnd_stamp = tcp_time_stamp; 1972 icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good);
1973 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
1945} 1974}
1946 1975
1947/* Restart timer after forward progress on connection. 1976/* Restart timer after forward progress on connection.
@@ -1951,9 +1980,9 @@ static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt,
1951static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) 1980static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
1952{ 1981{
1953 if (!tp->packets_out) { 1982 if (!tp->packets_out) {
1954 tcp_clear_xmit_timer(sk, TCP_TIME_RETRANS); 1983 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
1955 } else { 1984 } else {
1956 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 1985 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
1957 } 1986 }
1958} 1987}
1959 1988
@@ -2068,9 +2097,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2068 seq_rtt = -1; 2097 seq_rtt = -1;
2069 } else if (seq_rtt < 0) 2098 } else if (seq_rtt < 0)
2070 seq_rtt = now - scb->when; 2099 seq_rtt = now - scb->when;
2071 if (seq_usrtt) 2100 if (seq_usrtt) {
2072 *seq_usrtt = (usnow.tv_sec - skb->stamp.tv_sec) * 1000000 2101 struct timeval tv;
2073 + (usnow.tv_usec - skb->stamp.tv_usec); 2102
2103 skb_get_timestamp(skb, &tv);
2104 *seq_usrtt = (usnow.tv_sec - tv.tv_sec) * 1000000
2105 + (usnow.tv_usec - tv.tv_usec);
2106 }
2074 2107
2075 if (sacked & TCPCB_SACKED_ACKED) 2108 if (sacked & TCPCB_SACKED_ACKED)
2076 tp->sacked_out -= tcp_skb_pcount(skb); 2109 tp->sacked_out -= tcp_skb_pcount(skb);
@@ -2085,16 +2118,17 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2085 seq_rtt = now - scb->when; 2118 seq_rtt = now - scb->when;
2086 tcp_dec_pcount_approx(&tp->fackets_out, skb); 2119 tcp_dec_pcount_approx(&tp->fackets_out, skb);
2087 tcp_packets_out_dec(tp, skb); 2120 tcp_packets_out_dec(tp, skb);
2088 __skb_unlink(skb, skb->list); 2121 __skb_unlink(skb, &sk->sk_write_queue);
2089 sk_stream_free_skb(sk, skb); 2122 sk_stream_free_skb(sk, skb);
2090 } 2123 }
2091 2124
2092 if (acked&FLAG_ACKED) { 2125 if (acked&FLAG_ACKED) {
2093 tcp_ack_update_rtt(tp, acked, seq_rtt, seq_usrtt); 2126 const struct inet_connection_sock *icsk = inet_csk(sk);
2127 tcp_ack_update_rtt(sk, acked, seq_rtt, seq_usrtt);
2094 tcp_ack_packets_out(sk, tp); 2128 tcp_ack_packets_out(sk, tp);
2095 2129
2096 if (tp->ca_ops->pkts_acked) 2130 if (icsk->icsk_ca_ops->pkts_acked)
2097 tp->ca_ops->pkts_acked(tp, pkts_acked); 2131 icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked);
2098 } 2132 }
2099 2133
2100#if FASTRETRANS_DEBUG > 0 2134#if FASTRETRANS_DEBUG > 0
@@ -2102,19 +2136,20 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2102 BUG_TRAP((int)tp->lost_out >= 0); 2136 BUG_TRAP((int)tp->lost_out >= 0);
2103 BUG_TRAP((int)tp->retrans_out >= 0); 2137 BUG_TRAP((int)tp->retrans_out >= 0);
2104 if (!tp->packets_out && tp->rx_opt.sack_ok) { 2138 if (!tp->packets_out && tp->rx_opt.sack_ok) {
2139 const struct inet_connection_sock *icsk = inet_csk(sk);
2105 if (tp->lost_out) { 2140 if (tp->lost_out) {
2106 printk(KERN_DEBUG "Leak l=%u %d\n", 2141 printk(KERN_DEBUG "Leak l=%u %d\n",
2107 tp->lost_out, tp->ca_state); 2142 tp->lost_out, icsk->icsk_ca_state);
2108 tp->lost_out = 0; 2143 tp->lost_out = 0;
2109 } 2144 }
2110 if (tp->sacked_out) { 2145 if (tp->sacked_out) {
2111 printk(KERN_DEBUG "Leak s=%u %d\n", 2146 printk(KERN_DEBUG "Leak s=%u %d\n",
2112 tp->sacked_out, tp->ca_state); 2147 tp->sacked_out, icsk->icsk_ca_state);
2113 tp->sacked_out = 0; 2148 tp->sacked_out = 0;
2114 } 2149 }
2115 if (tp->retrans_out) { 2150 if (tp->retrans_out) {
2116 printk(KERN_DEBUG "Leak r=%u %d\n", 2151 printk(KERN_DEBUG "Leak r=%u %d\n",
2117 tp->retrans_out, tp->ca_state); 2152 tp->retrans_out, icsk->icsk_ca_state);
2118 tp->retrans_out = 0; 2153 tp->retrans_out = 0;
2119 } 2154 }
2120 } 2155 }
@@ -2125,40 +2160,43 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2125 2160
2126static void tcp_ack_probe(struct sock *sk) 2161static void tcp_ack_probe(struct sock *sk)
2127{ 2162{
2128 struct tcp_sock *tp = tcp_sk(sk); 2163 const struct tcp_sock *tp = tcp_sk(sk);
2164 struct inet_connection_sock *icsk = inet_csk(sk);
2129 2165
2130 /* Was it a usable window open? */ 2166 /* Was it a usable window open? */
2131 2167
2132 if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq, 2168 if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
2133 tp->snd_una + tp->snd_wnd)) { 2169 tp->snd_una + tp->snd_wnd)) {
2134 tp->backoff = 0; 2170 icsk->icsk_backoff = 0;
2135 tcp_clear_xmit_timer(sk, TCP_TIME_PROBE0); 2171 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
2136 /* Socket must be waked up by subsequent tcp_data_snd_check(). 2172 /* Socket must be waked up by subsequent tcp_data_snd_check().
2137 * This function is not for random using! 2173 * This function is not for random using!
2138 */ 2174 */
2139 } else { 2175 } else {
2140 tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, 2176 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2141 min(tp->rto << tp->backoff, TCP_RTO_MAX)); 2177 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2178 TCP_RTO_MAX);
2142 } 2179 }
2143} 2180}
2144 2181
2145static inline int tcp_ack_is_dubious(struct tcp_sock *tp, int flag) 2182static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
2146{ 2183{
2147 return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 2184 return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
2148 tp->ca_state != TCP_CA_Open); 2185 inet_csk(sk)->icsk_ca_state != TCP_CA_Open);
2149} 2186}
2150 2187
2151static inline int tcp_may_raise_cwnd(struct tcp_sock *tp, int flag) 2188static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
2152{ 2189{
2190 const struct tcp_sock *tp = tcp_sk(sk);
2153 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && 2191 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
2154 !((1<<tp->ca_state)&(TCPF_CA_Recovery|TCPF_CA_CWR)); 2192 !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR));
2155} 2193}
2156 2194
2157/* Check that window update is acceptable. 2195/* Check that window update is acceptable.
2158 * The function assumes that snd_una<=ack<=snd_next. 2196 * The function assumes that snd_una<=ack<=snd_next.
2159 */ 2197 */
2160static inline int tcp_may_update_window(struct tcp_sock *tp, u32 ack, 2198static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack,
2161 u32 ack_seq, u32 nwin) 2199 const u32 ack_seq, const u32 nwin)
2162{ 2200{
2163 return (after(ack, tp->snd_una) || 2201 return (after(ack, tp->snd_una) ||
2164 after(ack_seq, tp->snd_wl1) || 2202 after(ack_seq, tp->snd_wl1) ||
@@ -2241,6 +2279,7 @@ static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
2241/* This routine deals with incoming acks, but not outgoing ones. */ 2279/* This routine deals with incoming acks, but not outgoing ones. */
2242static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) 2280static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2243{ 2281{
2282 struct inet_connection_sock *icsk = inet_csk(sk);
2244 struct tcp_sock *tp = tcp_sk(sk); 2283 struct tcp_sock *tp = tcp_sk(sk);
2245 u32 prior_snd_una = tp->snd_una; 2284 u32 prior_snd_una = tp->snd_una;
2246 u32 ack_seq = TCP_SKB_CB(skb)->seq; 2285 u32 ack_seq = TCP_SKB_CB(skb)->seq;
@@ -2268,7 +2307,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2268 tp->snd_una = ack; 2307 tp->snd_una = ack;
2269 flag |= FLAG_WIN_UPDATE; 2308 flag |= FLAG_WIN_UPDATE;
2270 2309
2271 tcp_ca_event(tp, CA_EVENT_FAST_ACK); 2310 tcp_ca_event(sk, CA_EVENT_FAST_ACK);
2272 2311
2273 NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS); 2312 NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
2274 } else { 2313 } else {
@@ -2285,7 +2324,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2285 if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th)) 2324 if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))
2286 flag |= FLAG_ECE; 2325 flag |= FLAG_ECE;
2287 2326
2288 tcp_ca_event(tp, CA_EVENT_SLOW_ACK); 2327 tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
2289 } 2328 }
2290 2329
2291 /* We passed data and got it acked, remove any soft error 2330 /* We passed data and got it acked, remove any soft error
@@ -2301,19 +2340,19 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2301 2340
2302 /* See if we can take anything off of the retransmit queue. */ 2341 /* See if we can take anything off of the retransmit queue. */
2303 flag |= tcp_clean_rtx_queue(sk, &seq_rtt, 2342 flag |= tcp_clean_rtx_queue(sk, &seq_rtt,
2304 tp->ca_ops->rtt_sample ? &seq_usrtt : NULL); 2343 icsk->icsk_ca_ops->rtt_sample ? &seq_usrtt : NULL);
2305 2344
2306 if (tp->frto_counter) 2345 if (tp->frto_counter)
2307 tcp_process_frto(sk, prior_snd_una); 2346 tcp_process_frto(sk, prior_snd_una);
2308 2347
2309 if (tcp_ack_is_dubious(tp, flag)) { 2348 if (tcp_ack_is_dubious(sk, flag)) {
2310 /* Advanve CWND, if state allows this. */ 2349 /* Advanve CWND, if state allows this. */
2311 if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(tp, flag)) 2350 if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
2312 tcp_cong_avoid(tp, ack, seq_rtt, prior_in_flight, 0); 2351 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0);
2313 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); 2352 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
2314 } else { 2353 } else {
2315 if ((flag & FLAG_DATA_ACKED)) 2354 if ((flag & FLAG_DATA_ACKED))
2316 tcp_cong_avoid(tp, ack, seq_rtt, prior_in_flight, 1); 2355 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1);
2317 } 2356 }
2318 2357
2319 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP)) 2358 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
@@ -2322,7 +2361,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2322 return 1; 2361 return 1;
2323 2362
2324no_queue: 2363no_queue:
2325 tp->probes_out = 0; 2364 icsk->icsk_probes_out = 0;
2326 2365
2327 /* If this ack opens up a zero window, clear backoff. It was 2366 /* If this ack opens up a zero window, clear backoff. It was
2328 * being used to time the probes, and is probably far higher than 2367 * being used to time the probes, and is probably far higher than
@@ -2500,8 +2539,9 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
2500 * up to bandwidth of 18Gigabit/sec. 8) ] 2539 * up to bandwidth of 18Gigabit/sec. 8) ]
2501 */ 2540 */
2502 2541
2503static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb) 2542static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
2504{ 2543{
2544 struct tcp_sock *tp = tcp_sk(sk);
2505 struct tcphdr *th = skb->h.th; 2545 struct tcphdr *th = skb->h.th;
2506 u32 seq = TCP_SKB_CB(skb)->seq; 2546 u32 seq = TCP_SKB_CB(skb)->seq;
2507 u32 ack = TCP_SKB_CB(skb)->ack_seq; 2547 u32 ack = TCP_SKB_CB(skb)->ack_seq;
@@ -2516,14 +2556,15 @@ static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb)
2516 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && 2556 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
2517 2557
2518 /* 4. ... and sits in replay window. */ 2558 /* 4. ... and sits in replay window. */
2519 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (tp->rto*1024)/HZ); 2559 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
2520} 2560}
2521 2561
2522static inline int tcp_paws_discard(struct tcp_sock *tp, struct sk_buff *skb) 2562static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb)
2523{ 2563{
2564 const struct tcp_sock *tp = tcp_sk(sk);
2524 return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && 2565 return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
2525 xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && 2566 xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
2526 !tcp_disordered_ack(tp, skb)); 2567 !tcp_disordered_ack(sk, skb));
2527} 2568}
2528 2569
2529/* Check segment sequence number for validity. 2570/* Check segment sequence number for validity.
@@ -2586,7 +2627,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
2586{ 2627{
2587 struct tcp_sock *tp = tcp_sk(sk); 2628 struct tcp_sock *tp = tcp_sk(sk);
2588 2629
2589 tcp_schedule_ack(tp); 2630 inet_csk_schedule_ack(sk);
2590 2631
2591 sk->sk_shutdown |= RCV_SHUTDOWN; 2632 sk->sk_shutdown |= RCV_SHUTDOWN;
2592 sock_set_flag(sk, SOCK_DONE); 2633 sock_set_flag(sk, SOCK_DONE);
@@ -2596,7 +2637,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
2596 case TCP_ESTABLISHED: 2637 case TCP_ESTABLISHED:
2597 /* Move to CLOSE_WAIT */ 2638 /* Move to CLOSE_WAIT */
2598 tcp_set_state(sk, TCP_CLOSE_WAIT); 2639 tcp_set_state(sk, TCP_CLOSE_WAIT);
2599 tp->ack.pingpong = 1; 2640 inet_csk(sk)->icsk_ack.pingpong = 1;
2600 break; 2641 break;
2601 2642
2602 case TCP_CLOSE_WAIT: 2643 case TCP_CLOSE_WAIT:
@@ -2694,7 +2735,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
2694 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 2735 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
2695 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 2736 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
2696 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 2737 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
2697 tcp_enter_quickack_mode(tp); 2738 tcp_enter_quickack_mode(sk);
2698 2739
2699 if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { 2740 if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) {
2700 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 2741 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -2853,7 +2894,7 @@ static void tcp_ofo_queue(struct sock *sk)
2853 2894
2854 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 2895 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
2855 SOCK_DEBUG(sk, "ofo packet was already received \n"); 2896 SOCK_DEBUG(sk, "ofo packet was already received \n");
2856 __skb_unlink(skb, skb->list); 2897 __skb_unlink(skb, &tp->out_of_order_queue);
2857 __kfree_skb(skb); 2898 __kfree_skb(skb);
2858 continue; 2899 continue;
2859 } 2900 }
@@ -2861,7 +2902,7 @@ static void tcp_ofo_queue(struct sock *sk)
2861 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 2902 tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
2862 TCP_SKB_CB(skb)->end_seq); 2903 TCP_SKB_CB(skb)->end_seq);
2863 2904
2864 __skb_unlink(skb, skb->list); 2905 __skb_unlink(skb, &tp->out_of_order_queue);
2865 __skb_queue_tail(&sk->sk_receive_queue, skb); 2906 __skb_queue_tail(&sk->sk_receive_queue, skb);
2866 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 2907 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
2867 if(skb->h.th->fin) 2908 if(skb->h.th->fin)
@@ -2942,7 +2983,7 @@ queue_and_out:
2942 * gap in queue is filled. 2983 * gap in queue is filled.
2943 */ 2984 */
2944 if (skb_queue_empty(&tp->out_of_order_queue)) 2985 if (skb_queue_empty(&tp->out_of_order_queue))
2945 tp->ack.pingpong = 0; 2986 inet_csk(sk)->icsk_ack.pingpong = 0;
2946 } 2987 }
2947 2988
2948 if (tp->rx_opt.num_sacks) 2989 if (tp->rx_opt.num_sacks)
@@ -2963,8 +3004,8 @@ queue_and_out:
2963 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 3004 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
2964 3005
2965out_of_window: 3006out_of_window:
2966 tcp_enter_quickack_mode(tp); 3007 tcp_enter_quickack_mode(sk);
2967 tcp_schedule_ack(tp); 3008 inet_csk_schedule_ack(sk);
2968drop: 3009drop:
2969 __kfree_skb(skb); 3010 __kfree_skb(skb);
2970 return; 3011 return;
@@ -2974,7 +3015,7 @@ drop:
2974 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) 3015 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
2975 goto out_of_window; 3016 goto out_of_window;
2976 3017
2977 tcp_enter_quickack_mode(tp); 3018 tcp_enter_quickack_mode(sk);
2978 3019
2979 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 3020 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
2980 /* Partial packet, seq < rcv_next < end_seq */ 3021 /* Partial packet, seq < rcv_next < end_seq */
@@ -3003,7 +3044,7 @@ drop:
3003 3044
3004 /* Disable header prediction. */ 3045 /* Disable header prediction. */
3005 tp->pred_flags = 0; 3046 tp->pred_flags = 0;
3006 tcp_schedule_ack(tp); 3047 inet_csk_schedule_ack(sk);
3007 3048
3008 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 3049 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
3009 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 3050 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
@@ -3027,7 +3068,7 @@ drop:
3027 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 3068 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
3028 3069
3029 if (seq == TCP_SKB_CB(skb1)->end_seq) { 3070 if (seq == TCP_SKB_CB(skb1)->end_seq) {
3030 __skb_append(skb1, skb); 3071 __skb_append(skb1, skb, &tp->out_of_order_queue);
3031 3072
3032 if (!tp->rx_opt.num_sacks || 3073 if (!tp->rx_opt.num_sacks ||
3033 tp->selective_acks[0].end_seq != seq) 3074 tp->selective_acks[0].end_seq != seq)
@@ -3071,7 +3112,7 @@ drop:
3071 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq); 3112 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
3072 break; 3113 break;
3073 } 3114 }
3074 __skb_unlink(skb1, skb1->list); 3115 __skb_unlink(skb1, &tp->out_of_order_queue);
3075 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); 3116 tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
3076 __kfree_skb(skb1); 3117 __kfree_skb(skb1);
3077 } 3118 }
@@ -3088,8 +3129,9 @@ add_sack:
3088 * simplifies code) 3129 * simplifies code)
3089 */ 3130 */
3090static void 3131static void
3091tcp_collapse(struct sock *sk, struct sk_buff *head, 3132tcp_collapse(struct sock *sk, struct sk_buff_head *list,
3092 struct sk_buff *tail, u32 start, u32 end) 3133 struct sk_buff *head, struct sk_buff *tail,
3134 u32 start, u32 end)
3093{ 3135{
3094 struct sk_buff *skb; 3136 struct sk_buff *skb;
3095 3137
@@ -3099,7 +3141,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3099 /* No new bits? It is possible on ofo queue. */ 3141 /* No new bits? It is possible on ofo queue. */
3100 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 3142 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3101 struct sk_buff *next = skb->next; 3143 struct sk_buff *next = skb->next;
3102 __skb_unlink(skb, skb->list); 3144 __skb_unlink(skb, list);
3103 __kfree_skb(skb); 3145 __kfree_skb(skb);
3104 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 3146 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3105 skb = next; 3147 skb = next;
@@ -3145,7 +3187,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3145 nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head); 3187 nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
3146 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 3188 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
3147 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 3189 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
3148 __skb_insert(nskb, skb->prev, skb, skb->list); 3190 __skb_insert(nskb, skb->prev, skb, list);
3149 sk_stream_set_owner_r(nskb, sk); 3191 sk_stream_set_owner_r(nskb, sk);
3150 3192
3151 /* Copy data, releasing collapsed skbs. */ 3193 /* Copy data, releasing collapsed skbs. */
@@ -3164,7 +3206,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
3164 } 3206 }
3165 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 3207 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3166 struct sk_buff *next = skb->next; 3208 struct sk_buff *next = skb->next;
3167 __skb_unlink(skb, skb->list); 3209 __skb_unlink(skb, list);
3168 __kfree_skb(skb); 3210 __kfree_skb(skb);
3169 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 3211 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3170 skb = next; 3212 skb = next;
@@ -3200,7 +3242,8 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
3200 if (skb == (struct sk_buff *)&tp->out_of_order_queue || 3242 if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
3201 after(TCP_SKB_CB(skb)->seq, end) || 3243 after(TCP_SKB_CB(skb)->seq, end) ||
3202 before(TCP_SKB_CB(skb)->end_seq, start)) { 3244 before(TCP_SKB_CB(skb)->end_seq, start)) {
3203 tcp_collapse(sk, head, skb, start, end); 3245 tcp_collapse(sk, &tp->out_of_order_queue,
3246 head, skb, start, end);
3204 head = skb; 3247 head = skb;
3205 if (skb == (struct sk_buff *)&tp->out_of_order_queue) 3248 if (skb == (struct sk_buff *)&tp->out_of_order_queue)
3206 break; 3249 break;
@@ -3237,7 +3280,8 @@ static int tcp_prune_queue(struct sock *sk)
3237 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 3280 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
3238 3281
3239 tcp_collapse_ofo_queue(sk); 3282 tcp_collapse_ofo_queue(sk);
3240 tcp_collapse(sk, sk->sk_receive_queue.next, 3283 tcp_collapse(sk, &sk->sk_receive_queue,
3284 sk->sk_receive_queue.next,
3241 (struct sk_buff*)&sk->sk_receive_queue, 3285 (struct sk_buff*)&sk->sk_receive_queue,
3242 tp->copied_seq, tp->rcv_nxt); 3286 tp->copied_seq, tp->rcv_nxt);
3243 sk_stream_mem_reclaim(sk); 3287 sk_stream_mem_reclaim(sk);
@@ -3286,12 +3330,12 @@ void tcp_cwnd_application_limited(struct sock *sk)
3286{ 3330{
3287 struct tcp_sock *tp = tcp_sk(sk); 3331 struct tcp_sock *tp = tcp_sk(sk);
3288 3332
3289 if (tp->ca_state == TCP_CA_Open && 3333 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
3290 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 3334 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
3291 /* Limited by application or receiver window. */ 3335 /* Limited by application or receiver window. */
3292 u32 win_used = max(tp->snd_cwnd_used, 2U); 3336 u32 win_used = max(tp->snd_cwnd_used, 2U);
3293 if (win_used < tp->snd_cwnd) { 3337 if (win_used < tp->snd_cwnd) {
3294 tp->snd_ssthresh = tcp_current_ssthresh(tp); 3338 tp->snd_ssthresh = tcp_current_ssthresh(sk);
3295 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 3339 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
3296 } 3340 }
3297 tp->snd_cwnd_used = 0; 3341 tp->snd_cwnd_used = 0;
@@ -3370,13 +3414,13 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
3370 struct tcp_sock *tp = tcp_sk(sk); 3414 struct tcp_sock *tp = tcp_sk(sk);
3371 3415
3372 /* More than one full frame received... */ 3416 /* More than one full frame received... */
3373 if (((tp->rcv_nxt - tp->rcv_wup) > tp->ack.rcv_mss 3417 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss
3374 /* ... and right edge of window advances far enough. 3418 /* ... and right edge of window advances far enough.
3375 * (tcp_recvmsg() will send ACK otherwise). Or... 3419 * (tcp_recvmsg() will send ACK otherwise). Or...
3376 */ 3420 */
3377 && __tcp_select_window(sk) >= tp->rcv_wnd) || 3421 && __tcp_select_window(sk) >= tp->rcv_wnd) ||
3378 /* We ACK each frame or... */ 3422 /* We ACK each frame or... */
3379 tcp_in_quickack_mode(tp) || 3423 tcp_in_quickack_mode(sk) ||
3380 /* We have out of order data. */ 3424 /* We have out of order data. */
3381 (ofo_possible && 3425 (ofo_possible &&
3382 skb_peek(&tp->out_of_order_queue))) { 3426 skb_peek(&tp->out_of_order_queue))) {
@@ -3390,8 +3434,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
3390 3434
3391static __inline__ void tcp_ack_snd_check(struct sock *sk) 3435static __inline__ void tcp_ack_snd_check(struct sock *sk)
3392{ 3436{
3393 struct tcp_sock *tp = tcp_sk(sk); 3437 if (!inet_csk_ack_scheduled(sk)) {
3394 if (!tcp_ack_scheduled(tp)) {
3395 /* We sent a data segment already. */ 3438 /* We sent a data segment already. */
3396 return; 3439 return;
3397 } 3440 }
@@ -3462,7 +3505,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
3462 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 3505 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
3463 tp->copied_seq++; 3506 tp->copied_seq++;
3464 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { 3507 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
3465 __skb_unlink(skb, skb->list); 3508 __skb_unlink(skb, &sk->sk_receive_queue);
3466 __kfree_skb(skb); 3509 __kfree_skb(skb);
3467 } 3510 }
3468 } 3511 }
@@ -3645,7 +3688,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3645 tp->rcv_nxt == tp->rcv_wup) 3688 tp->rcv_nxt == tp->rcv_wup)
3646 tcp_store_ts_recent(tp); 3689 tcp_store_ts_recent(tp);
3647 3690
3648 tcp_rcv_rtt_measure_ts(tp, skb); 3691 tcp_rcv_rtt_measure_ts(sk, skb);
3649 3692
3650 /* We know that such packets are checksummed 3693 /* We know that such packets are checksummed
3651 * on entry. 3694 * on entry.
@@ -3678,7 +3721,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3678 tp->rcv_nxt == tp->rcv_wup) 3721 tp->rcv_nxt == tp->rcv_wup)
3679 tcp_store_ts_recent(tp); 3722 tcp_store_ts_recent(tp);
3680 3723
3681 tcp_rcv_rtt_measure_ts(tp, skb); 3724 tcp_rcv_rtt_measure_ts(sk, skb);
3682 3725
3683 __skb_pull(skb, tcp_header_len); 3726 __skb_pull(skb, tcp_header_len);
3684 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 3727 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
@@ -3699,7 +3742,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3699 tp->rcv_nxt == tp->rcv_wup) 3742 tp->rcv_nxt == tp->rcv_wup)
3700 tcp_store_ts_recent(tp); 3743 tcp_store_ts_recent(tp);
3701 3744
3702 tcp_rcv_rtt_measure_ts(tp, skb); 3745 tcp_rcv_rtt_measure_ts(sk, skb);
3703 3746
3704 if ((int)skb->truesize > sk->sk_forward_alloc) 3747 if ((int)skb->truesize > sk->sk_forward_alloc)
3705 goto step5; 3748 goto step5;
@@ -3719,7 +3762,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3719 /* Well, only one small jumplet in fast path... */ 3762 /* Well, only one small jumplet in fast path... */
3720 tcp_ack(sk, skb, FLAG_DATA); 3763 tcp_ack(sk, skb, FLAG_DATA);
3721 tcp_data_snd_check(sk, tp); 3764 tcp_data_snd_check(sk, tp);
3722 if (!tcp_ack_scheduled(tp)) 3765 if (!inet_csk_ack_scheduled(sk))
3723 goto no_ack; 3766 goto no_ack;
3724 } 3767 }
3725 3768
@@ -3741,7 +3784,7 @@ slow_path:
3741 * RFC1323: H1. Apply PAWS check first. 3784 * RFC1323: H1. Apply PAWS check first.
3742 */ 3785 */
3743 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 3786 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
3744 tcp_paws_discard(tp, skb)) { 3787 tcp_paws_discard(sk, skb)) {
3745 if (!th->rst) { 3788 if (!th->rst) {
3746 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 3789 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
3747 tcp_send_dupack(sk, skb); 3790 tcp_send_dupack(sk, skb);
@@ -3788,7 +3831,7 @@ step5:
3788 if(th->ack) 3831 if(th->ack)
3789 tcp_ack(sk, skb, FLAG_SLOWPATH); 3832 tcp_ack(sk, skb, FLAG_SLOWPATH);
3790 3833
3791 tcp_rcv_rtt_measure_ts(tp, skb); 3834 tcp_rcv_rtt_measure_ts(sk, skb);
3792 3835
3793 /* Process urgent data. */ 3836 /* Process urgent data. */
3794 tcp_urg(sk, skb, th); 3837 tcp_urg(sk, skb, th);
@@ -3817,6 +3860,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
3817 tcp_parse_options(skb, &tp->rx_opt, 0); 3860 tcp_parse_options(skb, &tp->rx_opt, 0);
3818 3861
3819 if (th->ack) { 3862 if (th->ack) {
3863 struct inet_connection_sock *icsk;
3820 /* rfc793: 3864 /* rfc793:
3821 * "If the state is SYN-SENT then 3865 * "If the state is SYN-SENT then
3822 * first check the ACK bit 3866 * first check the ACK bit
@@ -3920,7 +3964,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
3920 3964
3921 tcp_init_metrics(sk); 3965 tcp_init_metrics(sk);
3922 3966
3923 tcp_init_congestion_control(tp); 3967 tcp_init_congestion_control(sk);
3924 3968
3925 /* Prevent spurious tcp_cwnd_restart() on first data 3969 /* Prevent spurious tcp_cwnd_restart() on first data
3926 * packet. 3970 * packet.
@@ -3930,7 +3974,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
3930 tcp_init_buffer_space(sk); 3974 tcp_init_buffer_space(sk);
3931 3975
3932 if (sock_flag(sk, SOCK_KEEPOPEN)) 3976 if (sock_flag(sk, SOCK_KEEPOPEN))
3933 tcp_reset_keepalive_timer(sk, keepalive_time_when(tp)); 3977 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
3934 3978
3935 if (!tp->rx_opt.snd_wscale) 3979 if (!tp->rx_opt.snd_wscale)
3936 __tcp_fast_path_on(tp, tp->snd_wnd); 3980 __tcp_fast_path_on(tp, tp->snd_wnd);
@@ -3942,7 +3986,11 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
3942 sk_wake_async(sk, 0, POLL_OUT); 3986 sk_wake_async(sk, 0, POLL_OUT);
3943 } 3987 }
3944 3988
3945 if (sk->sk_write_pending || tp->defer_accept || tp->ack.pingpong) { 3989 icsk = inet_csk(sk);
3990
3991 if (sk->sk_write_pending ||
3992 icsk->icsk_accept_queue.rskq_defer_accept ||
3993 icsk->icsk_ack.pingpong) {
3946 /* Save one ACK. Data will be ready after 3994 /* Save one ACK. Data will be ready after
3947 * several ticks, if write_pending is set. 3995 * several ticks, if write_pending is set.
3948 * 3996 *
@@ -3950,12 +3998,13 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
3950 * look so _wonderfully_ clever, that I was not able 3998 * look so _wonderfully_ clever, that I was not able
3951 * to stand against the temptation 8) --ANK 3999 * to stand against the temptation 8) --ANK
3952 */ 4000 */
3953 tcp_schedule_ack(tp); 4001 inet_csk_schedule_ack(sk);
3954 tp->ack.lrcvtime = tcp_time_stamp; 4002 icsk->icsk_ack.lrcvtime = tcp_time_stamp;
3955 tp->ack.ato = TCP_ATO_MIN; 4003 icsk->icsk_ack.ato = TCP_ATO_MIN;
3956 tcp_incr_quickack(tp); 4004 tcp_incr_quickack(sk);
3957 tcp_enter_quickack_mode(tp); 4005 tcp_enter_quickack_mode(sk);
3958 tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX); 4006 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
4007 TCP_DELACK_MAX, TCP_RTO_MAX);
3959 4008
3960discard: 4009discard:
3961 __kfree_skb(skb); 4010 __kfree_skb(skb);
@@ -4111,7 +4160,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4111 } 4160 }
4112 4161
4113 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 4162 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4114 tcp_paws_discard(tp, skb)) { 4163 tcp_paws_discard(sk, skb)) {
4115 if (!th->rst) { 4164 if (!th->rst) {
4116 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 4165 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
4117 tcp_send_dupack(sk, skb); 4166 tcp_send_dupack(sk, skb);
@@ -4180,7 +4229,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4180 */ 4229 */
4181 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 4230 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
4182 !tp->srtt) 4231 !tp->srtt)
4183 tcp_ack_saw_tstamp(tp, 0, 0); 4232 tcp_ack_saw_tstamp(sk, NULL, 0);
4184 4233
4185 if (tp->rx_opt.tstamp_ok) 4234 if (tp->rx_opt.tstamp_ok)
4186 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 4235 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
@@ -4192,7 +4241,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4192 4241
4193 tcp_init_metrics(sk); 4242 tcp_init_metrics(sk);
4194 4243
4195 tcp_init_congestion_control(tp); 4244 tcp_init_congestion_control(sk);
4196 4245
4197 /* Prevent spurious tcp_cwnd_restart() on 4246 /* Prevent spurious tcp_cwnd_restart() on
4198 * first data packet. 4247 * first data packet.
@@ -4227,9 +4276,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4227 return 1; 4276 return 1;
4228 } 4277 }
4229 4278
4230 tmo = tcp_fin_time(tp); 4279 tmo = tcp_fin_time(sk);
4231 if (tmo > TCP_TIMEWAIT_LEN) { 4280 if (tmo > TCP_TIMEWAIT_LEN) {
4232 tcp_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); 4281 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
4233 } else if (th->fin || sock_owned_by_user(sk)) { 4282 } else if (th->fin || sock_owned_by_user(sk)) {
4234 /* Bad case. We could lose such FIN otherwise. 4283 /* Bad case. We could lose such FIN otherwise.
4235 * It is not a big problem, but it looks confusing 4284 * It is not a big problem, but it looks confusing
@@ -4237,7 +4286,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4237 * if it spins in bh_lock_sock(), but it is really 4286 * if it spins in bh_lock_sock(), but it is really
4238 * marginal case. 4287 * marginal case.
4239 */ 4288 */
4240 tcp_reset_keepalive_timer(sk, tmo); 4289 inet_csk_reset_keepalive_timer(sk, tmo);
4241 } else { 4290 } else {
4242 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 4291 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
4243 goto discard; 4292 goto discard;