diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 266 |
1 files changed, 138 insertions, 128 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ffa24025cd02..8a8c5c2d90cb 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -114,20 +114,21 @@ int sysctl_tcp_moderate_rcvbuf = 1; | |||
114 | /* Adapt the MSS value used to make delayed ack decision to the | 114 | /* Adapt the MSS value used to make delayed ack decision to the |
115 | * real world. | 115 | * real world. |
116 | */ | 116 | */ |
117 | static inline void tcp_measure_rcv_mss(struct tcp_sock *tp, | 117 | static inline void tcp_measure_rcv_mss(struct sock *sk, |
118 | struct sk_buff *skb) | 118 | const struct sk_buff *skb) |
119 | { | 119 | { |
120 | unsigned int len, lss; | 120 | struct inet_connection_sock *icsk = inet_csk(sk); |
121 | const unsigned int lss = icsk->icsk_ack.last_seg_size; | ||
122 | unsigned int len; | ||
121 | 123 | ||
122 | lss = tp->ack.last_seg_size; | 124 | icsk->icsk_ack.last_seg_size = 0; |
123 | tp->ack.last_seg_size = 0; | ||
124 | 125 | ||
125 | /* skb->len may jitter because of SACKs, even if peer | 126 | /* skb->len may jitter because of SACKs, even if peer |
126 | * sends good full-sized frames. | 127 | * sends good full-sized frames. |
127 | */ | 128 | */ |
128 | len = skb->len; | 129 | len = skb->len; |
129 | if (len >= tp->ack.rcv_mss) { | 130 | if (len >= icsk->icsk_ack.rcv_mss) { |
130 | tp->ack.rcv_mss = len; | 131 | icsk->icsk_ack.rcv_mss = len; |
131 | } else { | 132 | } else { |
132 | /* Otherwise, we make more careful check taking into account, | 133 | /* Otherwise, we make more careful check taking into account, |
133 | * that SACKs block is variable. | 134 | * that SACKs block is variable. |
@@ -147,41 +148,44 @@ static inline void tcp_measure_rcv_mss(struct tcp_sock *tp, | |||
147 | * tcp header plus fixed timestamp option length. | 148 | * tcp header plus fixed timestamp option length. |
148 | * Resulting "len" is MSS free of SACK jitter. | 149 | * Resulting "len" is MSS free of SACK jitter. |
149 | */ | 150 | */ |
150 | len -= tp->tcp_header_len; | 151 | len -= tcp_sk(sk)->tcp_header_len; |
151 | tp->ack.last_seg_size = len; | 152 | icsk->icsk_ack.last_seg_size = len; |
152 | if (len == lss) { | 153 | if (len == lss) { |
153 | tp->ack.rcv_mss = len; | 154 | icsk->icsk_ack.rcv_mss = len; |
154 | return; | 155 | return; |
155 | } | 156 | } |
156 | } | 157 | } |
157 | tp->ack.pending |= TCP_ACK_PUSHED; | 158 | icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; |
158 | } | 159 | } |
159 | } | 160 | } |
160 | 161 | ||
161 | static void tcp_incr_quickack(struct tcp_sock *tp) | 162 | static void tcp_incr_quickack(struct sock *sk) |
162 | { | 163 | { |
163 | unsigned quickacks = tp->rcv_wnd/(2*tp->ack.rcv_mss); | 164 | struct inet_connection_sock *icsk = inet_csk(sk); |
165 | unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); | ||
164 | 166 | ||
165 | if (quickacks==0) | 167 | if (quickacks==0) |
166 | quickacks=2; | 168 | quickacks=2; |
167 | if (quickacks > tp->ack.quick) | 169 | if (quickacks > icsk->icsk_ack.quick) |
168 | tp->ack.quick = min(quickacks, TCP_MAX_QUICKACKS); | 170 | icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); |
169 | } | 171 | } |
170 | 172 | ||
171 | void tcp_enter_quickack_mode(struct tcp_sock *tp) | 173 | void tcp_enter_quickack_mode(struct sock *sk) |
172 | { | 174 | { |
173 | tcp_incr_quickack(tp); | 175 | struct inet_connection_sock *icsk = inet_csk(sk); |
174 | tp->ack.pingpong = 0; | 176 | tcp_incr_quickack(sk); |
175 | tp->ack.ato = TCP_ATO_MIN; | 177 | icsk->icsk_ack.pingpong = 0; |
178 | icsk->icsk_ack.ato = TCP_ATO_MIN; | ||
176 | } | 179 | } |
177 | 180 | ||
178 | /* Send ACKs quickly, if "quick" count is not exhausted | 181 | /* Send ACKs quickly, if "quick" count is not exhausted |
179 | * and the session is not interactive. | 182 | * and the session is not interactive. |
180 | */ | 183 | */ |
181 | 184 | ||
182 | static __inline__ int tcp_in_quickack_mode(struct tcp_sock *tp) | 185 | static inline int tcp_in_quickack_mode(const struct sock *sk) |
183 | { | 186 | { |
184 | return (tp->ack.quick && !tp->ack.pingpong); | 187 | const struct inet_connection_sock *icsk = inet_csk(sk); |
188 | return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; | ||
185 | } | 189 | } |
186 | 190 | ||
187 | /* Buffer size and advertised window tuning. | 191 | /* Buffer size and advertised window tuning. |
@@ -224,8 +228,8 @@ static void tcp_fixup_sndbuf(struct sock *sk) | |||
224 | */ | 228 | */ |
225 | 229 | ||
226 | /* Slow part of check#2. */ | 230 | /* Slow part of check#2. */ |
227 | static int __tcp_grow_window(struct sock *sk, struct tcp_sock *tp, | 231 | static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp, |
228 | struct sk_buff *skb) | 232 | const struct sk_buff *skb) |
229 | { | 233 | { |
230 | /* Optimize this! */ | 234 | /* Optimize this! */ |
231 | int truesize = tcp_win_from_space(skb->truesize)/2; | 235 | int truesize = tcp_win_from_space(skb->truesize)/2; |
@@ -233,7 +237,7 @@ static int __tcp_grow_window(struct sock *sk, struct tcp_sock *tp, | |||
233 | 237 | ||
234 | while (tp->rcv_ssthresh <= window) { | 238 | while (tp->rcv_ssthresh <= window) { |
235 | if (truesize <= skb->len) | 239 | if (truesize <= skb->len) |
236 | return 2*tp->ack.rcv_mss; | 240 | return 2 * inet_csk(sk)->icsk_ack.rcv_mss; |
237 | 241 | ||
238 | truesize >>= 1; | 242 | truesize >>= 1; |
239 | window >>= 1; | 243 | window >>= 1; |
@@ -260,7 +264,7 @@ static inline void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, | |||
260 | 264 | ||
261 | if (incr) { | 265 | if (incr) { |
262 | tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); | 266 | tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); |
263 | tp->ack.quick |= 1; | 267 | inet_csk(sk)->icsk_ack.quick |= 1; |
264 | } | 268 | } |
265 | } | 269 | } |
266 | } | 270 | } |
@@ -325,7 +329,7 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) | |||
325 | unsigned int app_win = tp->rcv_nxt - tp->copied_seq; | 329 | unsigned int app_win = tp->rcv_nxt - tp->copied_seq; |
326 | int ofo_win = 0; | 330 | int ofo_win = 0; |
327 | 331 | ||
328 | tp->ack.quick = 0; | 332 | inet_csk(sk)->icsk_ack.quick = 0; |
329 | 333 | ||
330 | skb_queue_walk(&tp->out_of_order_queue, skb) { | 334 | skb_queue_walk(&tp->out_of_order_queue, skb) { |
331 | ofo_win += skb->len; | 335 | ofo_win += skb->len; |
@@ -346,8 +350,8 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) | |||
346 | app_win += ofo_win; | 350 | app_win += ofo_win; |
347 | if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf) | 351 | if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf) |
348 | app_win >>= 1; | 352 | app_win >>= 1; |
349 | if (app_win > tp->ack.rcv_mss) | 353 | if (app_win > inet_csk(sk)->icsk_ack.rcv_mss) |
350 | app_win -= tp->ack.rcv_mss; | 354 | app_win -= inet_csk(sk)->icsk_ack.rcv_mss; |
351 | app_win = max(app_win, 2U*tp->advmss); | 355 | app_win = max(app_win, 2U*tp->advmss); |
352 | 356 | ||
353 | if (!ofo_win) | 357 | if (!ofo_win) |
@@ -415,11 +419,12 @@ new_measure: | |||
415 | tp->rcv_rtt_est.time = tcp_time_stamp; | 419 | tp->rcv_rtt_est.time = tcp_time_stamp; |
416 | } | 420 | } |
417 | 421 | ||
418 | static inline void tcp_rcv_rtt_measure_ts(struct tcp_sock *tp, struct sk_buff *skb) | 422 | static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb) |
419 | { | 423 | { |
424 | struct tcp_sock *tp = tcp_sk(sk); | ||
420 | if (tp->rx_opt.rcv_tsecr && | 425 | if (tp->rx_opt.rcv_tsecr && |
421 | (TCP_SKB_CB(skb)->end_seq - | 426 | (TCP_SKB_CB(skb)->end_seq - |
422 | TCP_SKB_CB(skb)->seq >= tp->ack.rcv_mss)) | 427 | TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) |
423 | tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); | 428 | tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); |
424 | } | 429 | } |
425 | 430 | ||
@@ -492,41 +497,42 @@ new_measure: | |||
492 | */ | 497 | */ |
493 | static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) | 498 | static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) |
494 | { | 499 | { |
500 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
495 | u32 now; | 501 | u32 now; |
496 | 502 | ||
497 | tcp_schedule_ack(tp); | 503 | inet_csk_schedule_ack(sk); |
498 | 504 | ||
499 | tcp_measure_rcv_mss(tp, skb); | 505 | tcp_measure_rcv_mss(sk, skb); |
500 | 506 | ||
501 | tcp_rcv_rtt_measure(tp); | 507 | tcp_rcv_rtt_measure(tp); |
502 | 508 | ||
503 | now = tcp_time_stamp; | 509 | now = tcp_time_stamp; |
504 | 510 | ||
505 | if (!tp->ack.ato) { | 511 | if (!icsk->icsk_ack.ato) { |
506 | /* The _first_ data packet received, initialize | 512 | /* The _first_ data packet received, initialize |
507 | * delayed ACK engine. | 513 | * delayed ACK engine. |
508 | */ | 514 | */ |
509 | tcp_incr_quickack(tp); | 515 | tcp_incr_quickack(sk); |
510 | tp->ack.ato = TCP_ATO_MIN; | 516 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
511 | } else { | 517 | } else { |
512 | int m = now - tp->ack.lrcvtime; | 518 | int m = now - icsk->icsk_ack.lrcvtime; |
513 | 519 | ||
514 | if (m <= TCP_ATO_MIN/2) { | 520 | if (m <= TCP_ATO_MIN/2) { |
515 | /* The fastest case is the first. */ | 521 | /* The fastest case is the first. */ |
516 | tp->ack.ato = (tp->ack.ato>>1) + TCP_ATO_MIN/2; | 522 | icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; |
517 | } else if (m < tp->ack.ato) { | 523 | } else if (m < icsk->icsk_ack.ato) { |
518 | tp->ack.ato = (tp->ack.ato>>1) + m; | 524 | icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; |
519 | if (tp->ack.ato > tp->rto) | 525 | if (icsk->icsk_ack.ato > icsk->icsk_rto) |
520 | tp->ack.ato = tp->rto; | 526 | icsk->icsk_ack.ato = icsk->icsk_rto; |
521 | } else if (m > tp->rto) { | 527 | } else if (m > icsk->icsk_rto) { |
522 | /* Too long gap. Apparently sender falled to | 528 | /* Too long gap. Apparently sender falled to |
523 | * restart window, so that we send ACKs quickly. | 529 | * restart window, so that we send ACKs quickly. |
524 | */ | 530 | */ |
525 | tcp_incr_quickack(tp); | 531 | tcp_incr_quickack(sk); |
526 | sk_stream_mem_reclaim(sk); | 532 | sk_stream_mem_reclaim(sk); |
527 | } | 533 | } |
528 | } | 534 | } |
529 | tp->ack.lrcvtime = now; | 535 | icsk->icsk_ack.lrcvtime = now; |
530 | 536 | ||
531 | TCP_ECN_check_ce(tp, skb); | 537 | TCP_ECN_check_ce(tp, skb); |
532 | 538 | ||
@@ -611,8 +617,9 @@ static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt, u32 *usrtt) | |||
611 | /* Calculate rto without backoff. This is the second half of Van Jacobson's | 617 | /* Calculate rto without backoff. This is the second half of Van Jacobson's |
612 | * routine referred to above. | 618 | * routine referred to above. |
613 | */ | 619 | */ |
614 | static inline void tcp_set_rto(struct tcp_sock *tp) | 620 | static inline void tcp_set_rto(struct sock *sk) |
615 | { | 621 | { |
622 | const struct tcp_sock *tp = tcp_sk(sk); | ||
616 | /* Old crap is replaced with new one. 8) | 623 | /* Old crap is replaced with new one. 8) |
617 | * | 624 | * |
618 | * More seriously: | 625 | * More seriously: |
@@ -623,7 +630,7 @@ static inline void tcp_set_rto(struct tcp_sock *tp) | |||
623 | * is invisible. Actually, Linux-2.4 also generates erratic | 630 | * is invisible. Actually, Linux-2.4 also generates erratic |
624 | * ACKs in some curcumstances. | 631 | * ACKs in some curcumstances. |
625 | */ | 632 | */ |
626 | tp->rto = (tp->srtt >> 3) + tp->rttvar; | 633 | inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar; |
627 | 634 | ||
628 | /* 2. Fixups made earlier cannot be right. | 635 | /* 2. Fixups made earlier cannot be right. |
629 | * If we do not estimate RTO correctly without them, | 636 | * If we do not estimate RTO correctly without them, |
@@ -635,10 +642,10 @@ static inline void tcp_set_rto(struct tcp_sock *tp) | |||
635 | /* NOTE: clamping at TCP_RTO_MIN is not required, current algo | 642 | /* NOTE: clamping at TCP_RTO_MIN is not required, current algo |
636 | * guarantees that rto is higher. | 643 | * guarantees that rto is higher. |
637 | */ | 644 | */ |
638 | static inline void tcp_bound_rto(struct tcp_sock *tp) | 645 | static inline void tcp_bound_rto(struct sock *sk) |
639 | { | 646 | { |
640 | if (tp->rto > TCP_RTO_MAX) | 647 | if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) |
641 | tp->rto = TCP_RTO_MAX; | 648 | inet_csk(sk)->icsk_rto = TCP_RTO_MAX; |
642 | } | 649 | } |
643 | 650 | ||
644 | /* Save metrics learned by this TCP session. | 651 | /* Save metrics learned by this TCP session. |
@@ -658,7 +665,7 @@ void tcp_update_metrics(struct sock *sk) | |||
658 | if (dst && (dst->flags&DST_HOST)) { | 665 | if (dst && (dst->flags&DST_HOST)) { |
659 | int m; | 666 | int m; |
660 | 667 | ||
661 | if (tp->backoff || !tp->srtt) { | 668 | if (inet_csk(sk)->icsk_backoff || !tp->srtt) { |
662 | /* This session failed to estimate rtt. Why? | 669 | /* This session failed to estimate rtt. Why? |
663 | * Probably, no packets returned in time. | 670 | * Probably, no packets returned in time. |
664 | * Reset our results. | 671 | * Reset our results. |
@@ -801,9 +808,9 @@ static void tcp_init_metrics(struct sock *sk) | |||
801 | tp->mdev = dst_metric(dst, RTAX_RTTVAR); | 808 | tp->mdev = dst_metric(dst, RTAX_RTTVAR); |
802 | tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); | 809 | tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); |
803 | } | 810 | } |
804 | tcp_set_rto(tp); | 811 | tcp_set_rto(sk); |
805 | tcp_bound_rto(tp); | 812 | tcp_bound_rto(sk); |
806 | if (tp->rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) | 813 | if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) |
807 | goto reset; | 814 | goto reset; |
808 | tp->snd_cwnd = tcp_init_cwnd(tp, dst); | 815 | tp->snd_cwnd = tcp_init_cwnd(tp, dst); |
809 | tp->snd_cwnd_stamp = tcp_time_stamp; | 816 | tp->snd_cwnd_stamp = tcp_time_stamp; |
@@ -817,7 +824,7 @@ reset: | |||
817 | if (!tp->rx_opt.saw_tstamp && tp->srtt) { | 824 | if (!tp->rx_opt.saw_tstamp && tp->srtt) { |
818 | tp->srtt = 0; | 825 | tp->srtt = 0; |
819 | tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; | 826 | tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; |
820 | tp->rto = TCP_TIMEOUT_INIT; | 827 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; |
821 | } | 828 | } |
822 | } | 829 | } |
823 | 830 | ||
@@ -1118,7 +1125,7 @@ void tcp_enter_frto(struct sock *sk) | |||
1118 | 1125 | ||
1119 | if (tp->ca_state <= TCP_CA_Disorder || | 1126 | if (tp->ca_state <= TCP_CA_Disorder || |
1120 | tp->snd_una == tp->high_seq || | 1127 | tp->snd_una == tp->high_seq || |
1121 | (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { | 1128 | (tp->ca_state == TCP_CA_Loss && !inet_csk(sk)->icsk_retransmits)) { |
1122 | tp->prior_ssthresh = tcp_current_ssthresh(tp); | 1129 | tp->prior_ssthresh = tcp_current_ssthresh(tp); |
1123 | tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); | 1130 | tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); |
1124 | tcp_ca_event(tp, CA_EVENT_FRTO); | 1131 | tcp_ca_event(tp, CA_EVENT_FRTO); |
@@ -1214,7 +1221,7 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
1214 | 1221 | ||
1215 | /* Reduce ssthresh if it has not yet been made inside this window. */ | 1222 | /* Reduce ssthresh if it has not yet been made inside this window. */ |
1216 | if (tp->ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || | 1223 | if (tp->ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || |
1217 | (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { | 1224 | (tp->ca_state == TCP_CA_Loss && !inet_csk(sk)->icsk_retransmits)) { |
1218 | tp->prior_ssthresh = tcp_current_ssthresh(tp); | 1225 | tp->prior_ssthresh = tcp_current_ssthresh(tp); |
1219 | tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); | 1226 | tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); |
1220 | tcp_ca_event(tp, CA_EVENT_LOSS); | 1227 | tcp_ca_event(tp, CA_EVENT_LOSS); |
@@ -1253,7 +1260,7 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
1253 | TCP_ECN_queue_cwr(tp); | 1260 | TCP_ECN_queue_cwr(tp); |
1254 | } | 1261 | } |
1255 | 1262 | ||
1256 | static int tcp_check_sack_reneging(struct sock *sk, struct tcp_sock *tp) | 1263 | static int tcp_check_sack_reneging(struct sock *sk) |
1257 | { | 1264 | { |
1258 | struct sk_buff *skb; | 1265 | struct sk_buff *skb; |
1259 | 1266 | ||
@@ -1268,9 +1275,10 @@ static int tcp_check_sack_reneging(struct sock *sk, struct tcp_sock *tp) | |||
1268 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); | 1275 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); |
1269 | 1276 | ||
1270 | tcp_enter_loss(sk, 1); | 1277 | tcp_enter_loss(sk, 1); |
1271 | tp->retransmits++; | 1278 | inet_csk(sk)->icsk_retransmits++; |
1272 | tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)); | 1279 | tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)); |
1273 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 1280 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
1281 | inet_csk(sk)->icsk_rto); | ||
1274 | return 1; | 1282 | return 1; |
1275 | } | 1283 | } |
1276 | return 0; | 1284 | return 0; |
@@ -1281,15 +1289,15 @@ static inline int tcp_fackets_out(struct tcp_sock *tp) | |||
1281 | return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out; | 1289 | return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out; |
1282 | } | 1290 | } |
1283 | 1291 | ||
1284 | static inline int tcp_skb_timedout(struct tcp_sock *tp, struct sk_buff *skb) | 1292 | static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) |
1285 | { | 1293 | { |
1286 | return (tcp_time_stamp - TCP_SKB_CB(skb)->when > tp->rto); | 1294 | return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); |
1287 | } | 1295 | } |
1288 | 1296 | ||
1289 | static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) | 1297 | static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) |
1290 | { | 1298 | { |
1291 | return tp->packets_out && | 1299 | return tp->packets_out && |
1292 | tcp_skb_timedout(tp, skb_peek(&sk->sk_write_queue)); | 1300 | tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue)); |
1293 | } | 1301 | } |
1294 | 1302 | ||
1295 | /* Linux NewReno/SACK/FACK/ECN state machine. | 1303 | /* Linux NewReno/SACK/FACK/ECN state machine. |
@@ -1509,7 +1517,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) | |||
1509 | struct sk_buff *skb; | 1517 | struct sk_buff *skb; |
1510 | 1518 | ||
1511 | sk_stream_for_retrans_queue(skb, sk) { | 1519 | sk_stream_for_retrans_queue(skb, sk) { |
1512 | if (tcp_skb_timedout(tp, skb) && | 1520 | if (tcp_skb_timedout(sk, skb) && |
1513 | !(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { | 1521 | !(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { |
1514 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | 1522 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
1515 | tp->lost_out += tcp_skb_pcount(skb); | 1523 | tp->lost_out += tcp_skb_pcount(skb); |
@@ -1676,7 +1684,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) | |||
1676 | tp->left_out = tp->sacked_out; | 1684 | tp->left_out = tp->sacked_out; |
1677 | tcp_undo_cwr(tp, 1); | 1685 | tcp_undo_cwr(tp, 1); |
1678 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); | 1686 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); |
1679 | tp->retransmits = 0; | 1687 | inet_csk(sk)->icsk_retransmits = 0; |
1680 | tp->undo_marker = 0; | 1688 | tp->undo_marker = 0; |
1681 | if (!IsReno(tp)) | 1689 | if (!IsReno(tp)) |
1682 | tcp_set_ca_state(tp, TCP_CA_Open); | 1690 | tcp_set_ca_state(tp, TCP_CA_Open); |
@@ -1750,7 +1758,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
1750 | tp->prior_ssthresh = 0; | 1758 | tp->prior_ssthresh = 0; |
1751 | 1759 | ||
1752 | /* B. In all the states check for reneging SACKs. */ | 1760 | /* B. In all the states check for reneging SACKs. */ |
1753 | if (tp->sacked_out && tcp_check_sack_reneging(sk, tp)) | 1761 | if (tp->sacked_out && tcp_check_sack_reneging(sk)) |
1754 | return; | 1762 | return; |
1755 | 1763 | ||
1756 | /* C. Process data loss notification, provided it is valid. */ | 1764 | /* C. Process data loss notification, provided it is valid. */ |
@@ -1774,7 +1782,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
1774 | } else if (!before(tp->snd_una, tp->high_seq)) { | 1782 | } else if (!before(tp->snd_una, tp->high_seq)) { |
1775 | switch (tp->ca_state) { | 1783 | switch (tp->ca_state) { |
1776 | case TCP_CA_Loss: | 1784 | case TCP_CA_Loss: |
1777 | tp->retransmits = 0; | 1785 | inet_csk(sk)->icsk_retransmits = 0; |
1778 | if (tcp_try_undo_recovery(sk, tp)) | 1786 | if (tcp_try_undo_recovery(sk, tp)) |
1779 | return; | 1787 | return; |
1780 | break; | 1788 | break; |
@@ -1824,7 +1832,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
1824 | break; | 1832 | break; |
1825 | case TCP_CA_Loss: | 1833 | case TCP_CA_Loss: |
1826 | if (flag&FLAG_DATA_ACKED) | 1834 | if (flag&FLAG_DATA_ACKED) |
1827 | tp->retransmits = 0; | 1835 | inet_csk(sk)->icsk_retransmits = 0; |
1828 | if (!tcp_try_undo_loss(sk, tp)) { | 1836 | if (!tcp_try_undo_loss(sk, tp)) { |
1829 | tcp_moderate_cwnd(tp); | 1837 | tcp_moderate_cwnd(tp); |
1830 | tcp_xmit_retransmit_queue(sk); | 1838 | tcp_xmit_retransmit_queue(sk); |
@@ -1881,10 +1889,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
1881 | /* Read draft-ietf-tcplw-high-performance before mucking | 1889 | /* Read draft-ietf-tcplw-high-performance before mucking |
1882 | * with this code. (Superceeds RFC1323) | 1890 | * with this code. (Superceeds RFC1323) |
1883 | */ | 1891 | */ |
1884 | static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag) | 1892 | static void tcp_ack_saw_tstamp(struct sock *sk, u32 *usrtt, int flag) |
1885 | { | 1893 | { |
1886 | __u32 seq_rtt; | ||
1887 | |||
1888 | /* RTTM Rule: A TSecr value received in a segment is used to | 1894 | /* RTTM Rule: A TSecr value received in a segment is used to |
1889 | * update the averaged RTT measurement only if the segment | 1895 | * update the averaged RTT measurement only if the segment |
1890 | * acknowledges some new data, i.e., only if it advances the | 1896 | * acknowledges some new data, i.e., only if it advances the |
@@ -1900,14 +1906,15 @@ static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag) | |||
1900 | * answer arrives rto becomes 120 seconds! If at least one of segments | 1906 | * answer arrives rto becomes 120 seconds! If at least one of segments |
1901 | * in window is lost... Voila. --ANK (010210) | 1907 | * in window is lost... Voila. --ANK (010210) |
1902 | */ | 1908 | */ |
1903 | seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; | 1909 | struct tcp_sock *tp = tcp_sk(sk); |
1910 | const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; | ||
1904 | tcp_rtt_estimator(tp, seq_rtt, usrtt); | 1911 | tcp_rtt_estimator(tp, seq_rtt, usrtt); |
1905 | tcp_set_rto(tp); | 1912 | tcp_set_rto(sk); |
1906 | tp->backoff = 0; | 1913 | inet_csk(sk)->icsk_backoff = 0; |
1907 | tcp_bound_rto(tp); | 1914 | tcp_bound_rto(sk); |
1908 | } | 1915 | } |
1909 | 1916 | ||
1910 | static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int flag) | 1917 | static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, u32 *usrtt, int flag) |
1911 | { | 1918 | { |
1912 | /* We don't have a timestamp. Can only use | 1919 | /* We don't have a timestamp. Can only use |
1913 | * packets that are not retransmitted to determine | 1920 | * packets that are not retransmitted to determine |
@@ -1921,20 +1928,21 @@ static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int | |||
1921 | if (flag & FLAG_RETRANS_DATA_ACKED) | 1928 | if (flag & FLAG_RETRANS_DATA_ACKED) |
1922 | return; | 1929 | return; |
1923 | 1930 | ||
1924 | tcp_rtt_estimator(tp, seq_rtt, usrtt); | 1931 | tcp_rtt_estimator(tcp_sk(sk), seq_rtt, usrtt); |
1925 | tcp_set_rto(tp); | 1932 | tcp_set_rto(sk); |
1926 | tp->backoff = 0; | 1933 | inet_csk(sk)->icsk_backoff = 0; |
1927 | tcp_bound_rto(tp); | 1934 | tcp_bound_rto(sk); |
1928 | } | 1935 | } |
1929 | 1936 | ||
1930 | static inline void tcp_ack_update_rtt(struct tcp_sock *tp, | 1937 | static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, |
1931 | int flag, s32 seq_rtt, u32 *usrtt) | 1938 | const s32 seq_rtt, u32 *usrtt) |
1932 | { | 1939 | { |
1940 | const struct tcp_sock *tp = tcp_sk(sk); | ||
1933 | /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ | 1941 | /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ |
1934 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) | 1942 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) |
1935 | tcp_ack_saw_tstamp(tp, usrtt, flag); | 1943 | tcp_ack_saw_tstamp(sk, usrtt, flag); |
1936 | else if (seq_rtt >= 0) | 1944 | else if (seq_rtt >= 0) |
1937 | tcp_ack_no_tstamp(tp, seq_rtt, usrtt, flag); | 1945 | tcp_ack_no_tstamp(sk, seq_rtt, usrtt, flag); |
1938 | } | 1946 | } |
1939 | 1947 | ||
1940 | static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, | 1948 | static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, |
@@ -1951,9 +1959,9 @@ static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, | |||
1951 | static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) | 1959 | static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) |
1952 | { | 1960 | { |
1953 | if (!tp->packets_out) { | 1961 | if (!tp->packets_out) { |
1954 | tcp_clear_xmit_timer(sk, TCP_TIME_RETRANS); | 1962 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); |
1955 | } else { | 1963 | } else { |
1956 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 1964 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto); |
1957 | } | 1965 | } |
1958 | } | 1966 | } |
1959 | 1967 | ||
@@ -2090,7 +2098,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt | |||
2090 | } | 2098 | } |
2091 | 2099 | ||
2092 | if (acked&FLAG_ACKED) { | 2100 | if (acked&FLAG_ACKED) { |
2093 | tcp_ack_update_rtt(tp, acked, seq_rtt, seq_usrtt); | 2101 | tcp_ack_update_rtt(sk, acked, seq_rtt, seq_usrtt); |
2094 | tcp_ack_packets_out(sk, tp); | 2102 | tcp_ack_packets_out(sk, tp); |
2095 | 2103 | ||
2096 | if (tp->ca_ops->pkts_acked) | 2104 | if (tp->ca_ops->pkts_acked) |
@@ -2125,20 +2133,21 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt | |||
2125 | 2133 | ||
2126 | static void tcp_ack_probe(struct sock *sk) | 2134 | static void tcp_ack_probe(struct sock *sk) |
2127 | { | 2135 | { |
2128 | struct tcp_sock *tp = tcp_sk(sk); | 2136 | const struct tcp_sock *tp = tcp_sk(sk); |
2137 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
2129 | 2138 | ||
2130 | /* Was it a usable window open? */ | 2139 | /* Was it a usable window open? */ |
2131 | 2140 | ||
2132 | if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq, | 2141 | if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq, |
2133 | tp->snd_una + tp->snd_wnd)) { | 2142 | tp->snd_una + tp->snd_wnd)) { |
2134 | tp->backoff = 0; | 2143 | icsk->icsk_backoff = 0; |
2135 | tcp_clear_xmit_timer(sk, TCP_TIME_PROBE0); | 2144 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); |
2136 | /* Socket must be waked up by subsequent tcp_data_snd_check(). | 2145 | /* Socket must be waked up by subsequent tcp_data_snd_check(). |
2137 | * This function is not for random using! | 2146 | * This function is not for random using! |
2138 | */ | 2147 | */ |
2139 | } else { | 2148 | } else { |
2140 | tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, | 2149 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
2141 | min(tp->rto << tp->backoff, TCP_RTO_MAX)); | 2150 | min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX)); |
2142 | } | 2151 | } |
2143 | } | 2152 | } |
2144 | 2153 | ||
@@ -2157,8 +2166,8 @@ static inline int tcp_may_raise_cwnd(struct tcp_sock *tp, int flag) | |||
2157 | /* Check that window update is acceptable. | 2166 | /* Check that window update is acceptable. |
2158 | * The function assumes that snd_una<=ack<=snd_next. | 2167 | * The function assumes that snd_una<=ack<=snd_next. |
2159 | */ | 2168 | */ |
2160 | static inline int tcp_may_update_window(struct tcp_sock *tp, u32 ack, | 2169 | static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack, |
2161 | u32 ack_seq, u32 nwin) | 2170 | const u32 ack_seq, const u32 nwin) |
2162 | { | 2171 | { |
2163 | return (after(ack, tp->snd_una) || | 2172 | return (after(ack, tp->snd_una) || |
2164 | after(ack_seq, tp->snd_wl1) || | 2173 | after(ack_seq, tp->snd_wl1) || |
@@ -2500,8 +2509,9 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) | |||
2500 | * up to bandwidth of 18Gigabit/sec. 8) ] | 2509 | * up to bandwidth of 18Gigabit/sec. 8) ] |
2501 | */ | 2510 | */ |
2502 | 2511 | ||
2503 | static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb) | 2512 | static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) |
2504 | { | 2513 | { |
2514 | struct tcp_sock *tp = tcp_sk(sk); | ||
2505 | struct tcphdr *th = skb->h.th; | 2515 | struct tcphdr *th = skb->h.th; |
2506 | u32 seq = TCP_SKB_CB(skb)->seq; | 2516 | u32 seq = TCP_SKB_CB(skb)->seq; |
2507 | u32 ack = TCP_SKB_CB(skb)->ack_seq; | 2517 | u32 ack = TCP_SKB_CB(skb)->ack_seq; |
@@ -2516,14 +2526,15 @@ static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb) | |||
2516 | !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && | 2526 | !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && |
2517 | 2527 | ||
2518 | /* 4. ... and sits in replay window. */ | 2528 | /* 4. ... and sits in replay window. */ |
2519 | (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (tp->rto*1024)/HZ); | 2529 | (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); |
2520 | } | 2530 | } |
2521 | 2531 | ||
2522 | static inline int tcp_paws_discard(struct tcp_sock *tp, struct sk_buff *skb) | 2532 | static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb) |
2523 | { | 2533 | { |
2534 | const struct tcp_sock *tp = tcp_sk(sk); | ||
2524 | return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && | 2535 | return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && |
2525 | xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && | 2536 | xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && |
2526 | !tcp_disordered_ack(tp, skb)); | 2537 | !tcp_disordered_ack(sk, skb)); |
2527 | } | 2538 | } |
2528 | 2539 | ||
2529 | /* Check segment sequence number for validity. | 2540 | /* Check segment sequence number for validity. |
@@ -2586,7 +2597,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) | |||
2586 | { | 2597 | { |
2587 | struct tcp_sock *tp = tcp_sk(sk); | 2598 | struct tcp_sock *tp = tcp_sk(sk); |
2588 | 2599 | ||
2589 | tcp_schedule_ack(tp); | 2600 | inet_csk_schedule_ack(sk); |
2590 | 2601 | ||
2591 | sk->sk_shutdown |= RCV_SHUTDOWN; | 2602 | sk->sk_shutdown |= RCV_SHUTDOWN; |
2592 | sock_set_flag(sk, SOCK_DONE); | 2603 | sock_set_flag(sk, SOCK_DONE); |
@@ -2596,7 +2607,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) | |||
2596 | case TCP_ESTABLISHED: | 2607 | case TCP_ESTABLISHED: |
2597 | /* Move to CLOSE_WAIT */ | 2608 | /* Move to CLOSE_WAIT */ |
2598 | tcp_set_state(sk, TCP_CLOSE_WAIT); | 2609 | tcp_set_state(sk, TCP_CLOSE_WAIT); |
2599 | tp->ack.pingpong = 1; | 2610 | inet_csk(sk)->icsk_ack.pingpong = 1; |
2600 | break; | 2611 | break; |
2601 | 2612 | ||
2602 | case TCP_CLOSE_WAIT: | 2613 | case TCP_CLOSE_WAIT: |
@@ -2694,7 +2705,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) | |||
2694 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && | 2705 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && |
2695 | before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | 2706 | before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { |
2696 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); | 2707 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); |
2697 | tcp_enter_quickack_mode(tp); | 2708 | tcp_enter_quickack_mode(sk); |
2698 | 2709 | ||
2699 | if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { | 2710 | if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { |
2700 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; | 2711 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; |
@@ -2942,7 +2953,7 @@ queue_and_out: | |||
2942 | * gap in queue is filled. | 2953 | * gap in queue is filled. |
2943 | */ | 2954 | */ |
2944 | if (skb_queue_empty(&tp->out_of_order_queue)) | 2955 | if (skb_queue_empty(&tp->out_of_order_queue)) |
2945 | tp->ack.pingpong = 0; | 2956 | inet_csk(sk)->icsk_ack.pingpong = 0; |
2946 | } | 2957 | } |
2947 | 2958 | ||
2948 | if (tp->rx_opt.num_sacks) | 2959 | if (tp->rx_opt.num_sacks) |
@@ -2963,8 +2974,8 @@ queue_and_out: | |||
2963 | tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); | 2974 | tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); |
2964 | 2975 | ||
2965 | out_of_window: | 2976 | out_of_window: |
2966 | tcp_enter_quickack_mode(tp); | 2977 | tcp_enter_quickack_mode(sk); |
2967 | tcp_schedule_ack(tp); | 2978 | inet_csk_schedule_ack(sk); |
2968 | drop: | 2979 | drop: |
2969 | __kfree_skb(skb); | 2980 | __kfree_skb(skb); |
2970 | return; | 2981 | return; |
@@ -2974,7 +2985,7 @@ drop: | |||
2974 | if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) | 2985 | if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) |
2975 | goto out_of_window; | 2986 | goto out_of_window; |
2976 | 2987 | ||
2977 | tcp_enter_quickack_mode(tp); | 2988 | tcp_enter_quickack_mode(sk); |
2978 | 2989 | ||
2979 | if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | 2990 | if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { |
2980 | /* Partial packet, seq < rcv_next < end_seq */ | 2991 | /* Partial packet, seq < rcv_next < end_seq */ |
@@ -3003,7 +3014,7 @@ drop: | |||
3003 | 3014 | ||
3004 | /* Disable header prediction. */ | 3015 | /* Disable header prediction. */ |
3005 | tp->pred_flags = 0; | 3016 | tp->pred_flags = 0; |
3006 | tcp_schedule_ack(tp); | 3017 | inet_csk_schedule_ack(sk); |
3007 | 3018 | ||
3008 | SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", | 3019 | SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", |
3009 | tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); | 3020 | tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); |
@@ -3373,13 +3384,13 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) | |||
3373 | struct tcp_sock *tp = tcp_sk(sk); | 3384 | struct tcp_sock *tp = tcp_sk(sk); |
3374 | 3385 | ||
3375 | /* More than one full frame received... */ | 3386 | /* More than one full frame received... */ |
3376 | if (((tp->rcv_nxt - tp->rcv_wup) > tp->ack.rcv_mss | 3387 | if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss |
3377 | /* ... and right edge of window advances far enough. | 3388 | /* ... and right edge of window advances far enough. |
3378 | * (tcp_recvmsg() will send ACK otherwise). Or... | 3389 | * (tcp_recvmsg() will send ACK otherwise). Or... |
3379 | */ | 3390 | */ |
3380 | && __tcp_select_window(sk) >= tp->rcv_wnd) || | 3391 | && __tcp_select_window(sk) >= tp->rcv_wnd) || |
3381 | /* We ACK each frame or... */ | 3392 | /* We ACK each frame or... */ |
3382 | tcp_in_quickack_mode(tp) || | 3393 | tcp_in_quickack_mode(sk) || |
3383 | /* We have out of order data. */ | 3394 | /* We have out of order data. */ |
3384 | (ofo_possible && | 3395 | (ofo_possible && |
3385 | skb_peek(&tp->out_of_order_queue))) { | 3396 | skb_peek(&tp->out_of_order_queue))) { |
@@ -3393,8 +3404,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) | |||
3393 | 3404 | ||
3394 | static __inline__ void tcp_ack_snd_check(struct sock *sk) | 3405 | static __inline__ void tcp_ack_snd_check(struct sock *sk) |
3395 | { | 3406 | { |
3396 | struct tcp_sock *tp = tcp_sk(sk); | 3407 | if (!inet_csk_ack_scheduled(sk)) { |
3397 | if (!tcp_ack_scheduled(tp)) { | ||
3398 | /* We sent a data segment already. */ | 3408 | /* We sent a data segment already. */ |
3399 | return; | 3409 | return; |
3400 | } | 3410 | } |
@@ -3648,7 +3658,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3648 | tp->rcv_nxt == tp->rcv_wup) | 3658 | tp->rcv_nxt == tp->rcv_wup) |
3649 | tcp_store_ts_recent(tp); | 3659 | tcp_store_ts_recent(tp); |
3650 | 3660 | ||
3651 | tcp_rcv_rtt_measure_ts(tp, skb); | 3661 | tcp_rcv_rtt_measure_ts(sk, skb); |
3652 | 3662 | ||
3653 | /* We know that such packets are checksummed | 3663 | /* We know that such packets are checksummed |
3654 | * on entry. | 3664 | * on entry. |
@@ -3681,7 +3691,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3681 | tp->rcv_nxt == tp->rcv_wup) | 3691 | tp->rcv_nxt == tp->rcv_wup) |
3682 | tcp_store_ts_recent(tp); | 3692 | tcp_store_ts_recent(tp); |
3683 | 3693 | ||
3684 | tcp_rcv_rtt_measure_ts(tp, skb); | 3694 | tcp_rcv_rtt_measure_ts(sk, skb); |
3685 | 3695 | ||
3686 | __skb_pull(skb, tcp_header_len); | 3696 | __skb_pull(skb, tcp_header_len); |
3687 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 3697 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
@@ -3702,7 +3712,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3702 | tp->rcv_nxt == tp->rcv_wup) | 3712 | tp->rcv_nxt == tp->rcv_wup) |
3703 | tcp_store_ts_recent(tp); | 3713 | tcp_store_ts_recent(tp); |
3704 | 3714 | ||
3705 | tcp_rcv_rtt_measure_ts(tp, skb); | 3715 | tcp_rcv_rtt_measure_ts(sk, skb); |
3706 | 3716 | ||
3707 | if ((int)skb->truesize > sk->sk_forward_alloc) | 3717 | if ((int)skb->truesize > sk->sk_forward_alloc) |
3708 | goto step5; | 3718 | goto step5; |
@@ -3722,7 +3732,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3722 | /* Well, only one small jumplet in fast path... */ | 3732 | /* Well, only one small jumplet in fast path... */ |
3723 | tcp_ack(sk, skb, FLAG_DATA); | 3733 | tcp_ack(sk, skb, FLAG_DATA); |
3724 | tcp_data_snd_check(sk, tp); | 3734 | tcp_data_snd_check(sk, tp); |
3725 | if (!tcp_ack_scheduled(tp)) | 3735 | if (!inet_csk_ack_scheduled(sk)) |
3726 | goto no_ack; | 3736 | goto no_ack; |
3727 | } | 3737 | } |
3728 | 3738 | ||
@@ -3744,7 +3754,7 @@ slow_path: | |||
3744 | * RFC1323: H1. Apply PAWS check first. | 3754 | * RFC1323: H1. Apply PAWS check first. |
3745 | */ | 3755 | */ |
3746 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && | 3756 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && |
3747 | tcp_paws_discard(tp, skb)) { | 3757 | tcp_paws_discard(sk, skb)) { |
3748 | if (!th->rst) { | 3758 | if (!th->rst) { |
3749 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); | 3759 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); |
3750 | tcp_send_dupack(sk, skb); | 3760 | tcp_send_dupack(sk, skb); |
@@ -3791,7 +3801,7 @@ step5: | |||
3791 | if(th->ack) | 3801 | if(th->ack) |
3792 | tcp_ack(sk, skb, FLAG_SLOWPATH); | 3802 | tcp_ack(sk, skb, FLAG_SLOWPATH); |
3793 | 3803 | ||
3794 | tcp_rcv_rtt_measure_ts(tp, skb); | 3804 | tcp_rcv_rtt_measure_ts(sk, skb); |
3795 | 3805 | ||
3796 | /* Process urgent data. */ | 3806 | /* Process urgent data. */ |
3797 | tcp_urg(sk, skb, th); | 3807 | tcp_urg(sk, skb, th); |
@@ -3933,7 +3943,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
3933 | tcp_init_buffer_space(sk); | 3943 | tcp_init_buffer_space(sk); |
3934 | 3944 | ||
3935 | if (sock_flag(sk, SOCK_KEEPOPEN)) | 3945 | if (sock_flag(sk, SOCK_KEEPOPEN)) |
3936 | tcp_reset_keepalive_timer(sk, keepalive_time_when(tp)); | 3946 | inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); |
3937 | 3947 | ||
3938 | if (!tp->rx_opt.snd_wscale) | 3948 | if (!tp->rx_opt.snd_wscale) |
3939 | __tcp_fast_path_on(tp, tp->snd_wnd); | 3949 | __tcp_fast_path_on(tp, tp->snd_wnd); |
@@ -3945,7 +3955,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
3945 | sk_wake_async(sk, 0, POLL_OUT); | 3955 | sk_wake_async(sk, 0, POLL_OUT); |
3946 | } | 3956 | } |
3947 | 3957 | ||
3948 | if (sk->sk_write_pending || tp->defer_accept || tp->ack.pingpong) { | 3958 | if (sk->sk_write_pending || tp->defer_accept || inet_csk(sk)->icsk_ack.pingpong) { |
3949 | /* Save one ACK. Data will be ready after | 3959 | /* Save one ACK. Data will be ready after |
3950 | * several ticks, if write_pending is set. | 3960 | * several ticks, if write_pending is set. |
3951 | * | 3961 | * |
@@ -3953,12 +3963,12 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
3953 | * look so _wonderfully_ clever, that I was not able | 3963 | * look so _wonderfully_ clever, that I was not able |
3954 | * to stand against the temptation 8) --ANK | 3964 | * to stand against the temptation 8) --ANK |
3955 | */ | 3965 | */ |
3956 | tcp_schedule_ack(tp); | 3966 | inet_csk_schedule_ack(sk); |
3957 | tp->ack.lrcvtime = tcp_time_stamp; | 3967 | inet_csk(sk)->icsk_ack.lrcvtime = tcp_time_stamp; |
3958 | tp->ack.ato = TCP_ATO_MIN; | 3968 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; |
3959 | tcp_incr_quickack(tp); | 3969 | tcp_incr_quickack(sk); |
3960 | tcp_enter_quickack_mode(tp); | 3970 | tcp_enter_quickack_mode(sk); |
3961 | tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX); | 3971 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX); |
3962 | 3972 | ||
3963 | discard: | 3973 | discard: |
3964 | __kfree_skb(skb); | 3974 | __kfree_skb(skb); |
@@ -4114,7 +4124,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4114 | } | 4124 | } |
4115 | 4125 | ||
4116 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && | 4126 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && |
4117 | tcp_paws_discard(tp, skb)) { | 4127 | tcp_paws_discard(sk, skb)) { |
4118 | if (!th->rst) { | 4128 | if (!th->rst) { |
4119 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); | 4129 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); |
4120 | tcp_send_dupack(sk, skb); | 4130 | tcp_send_dupack(sk, skb); |
@@ -4183,7 +4193,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4183 | */ | 4193 | */ |
4184 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && | 4194 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && |
4185 | !tp->srtt) | 4195 | !tp->srtt) |
4186 | tcp_ack_saw_tstamp(tp, 0, 0); | 4196 | tcp_ack_saw_tstamp(sk, 0, 0); |
4187 | 4197 | ||
4188 | if (tp->rx_opt.tstamp_ok) | 4198 | if (tp->rx_opt.tstamp_ok) |
4189 | tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; | 4199 | tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; |
@@ -4230,9 +4240,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4230 | return 1; | 4240 | return 1; |
4231 | } | 4241 | } |
4232 | 4242 | ||
4233 | tmo = tcp_fin_time(tp); | 4243 | tmo = tcp_fin_time(sk); |
4234 | if (tmo > TCP_TIMEWAIT_LEN) { | 4244 | if (tmo > TCP_TIMEWAIT_LEN) { |
4235 | tcp_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); | 4245 | inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); |
4236 | } else if (th->fin || sock_owned_by_user(sk)) { | 4246 | } else if (th->fin || sock_owned_by_user(sk)) { |
4237 | /* Bad case. We could lose such FIN otherwise. | 4247 | /* Bad case. We could lose such FIN otherwise. |
4238 | * It is not a big problem, but it looks confusing | 4248 | * It is not a big problem, but it looks confusing |
@@ -4240,7 +4250,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4240 | * if it spins in bh_lock_sock(), but it is really | 4250 | * if it spins in bh_lock_sock(), but it is really |
4241 | * marginal case. | 4251 | * marginal case. |
4242 | */ | 4252 | */ |
4243 | tcp_reset_keepalive_timer(sk, tmo); | 4253 | inet_csk_reset_keepalive_timer(sk, tmo); |
4244 | } else { | 4254 | } else { |
4245 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | 4255 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
4246 | goto discard; | 4256 | goto discard; |