aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@mandriva.com>2005-08-10 03:03:31 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:56:18 -0400
commit6687e988d9aeaccad6774e6a8304f681f3ec0a03 (patch)
treeecd3d28f9989847aa1dcde4782de0210aeadc290 /net/ipv4/tcp_input.c
parent64ce207306debd7157f47282be94770407bec01c (diff)
[ICSK]: Move TCP congestion avoidance members to icsk
This changeset basically moves tcp_sk()->{ca_ops,ca_state,etc} to inet_csk(), minimal renaming/moving done in this changeset to ease review. Most of it is just changes of struct tcp_sock * to struct sock * parameters. With this we move to a state closer to two interesting goals: 1. Generalisation of net/ipv4/tcp_diag.c, becoming inet_diag.c, being used for any INET transport protocol that has struct inet_hashinfo and are derived from struct inet_connection_sock. Keeps the userspace API, that will just not display DCCP sockets, while newer versions of tools can support DCCP. 2. INET generic transport pluggable Congestion Avoidance infrastructure, using the current TCP CA infrastructure with DCCP. Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c223
1 files changed, 124 insertions, 99 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 71d456148de7..fdd9547fb783 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -325,11 +325,12 @@ static void tcp_init_buffer_space(struct sock *sk)
325/* 5. Recalculate window clamp after socket hit its memory bounds. */ 325/* 5. Recalculate window clamp after socket hit its memory bounds. */
326static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) 326static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
327{ 327{
328 struct inet_connection_sock *icsk = inet_csk(sk);
328 struct sk_buff *skb; 329 struct sk_buff *skb;
329 unsigned int app_win = tp->rcv_nxt - tp->copied_seq; 330 unsigned int app_win = tp->rcv_nxt - tp->copied_seq;
330 int ofo_win = 0; 331 int ofo_win = 0;
331 332
332 inet_csk(sk)->icsk_ack.quick = 0; 333 icsk->icsk_ack.quick = 0;
333 334
334 skb_queue_walk(&tp->out_of_order_queue, skb) { 335 skb_queue_walk(&tp->out_of_order_queue, skb) {
335 ofo_win += skb->len; 336 ofo_win += skb->len;
@@ -350,8 +351,8 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
350 app_win += ofo_win; 351 app_win += ofo_win;
351 if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf) 352 if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf)
352 app_win >>= 1; 353 app_win >>= 1;
353 if (app_win > inet_csk(sk)->icsk_ack.rcv_mss) 354 if (app_win > icsk->icsk_ack.rcv_mss)
354 app_win -= inet_csk(sk)->icsk_ack.rcv_mss; 355 app_win -= icsk->icsk_ack.rcv_mss;
355 app_win = max(app_win, 2U*tp->advmss); 356 app_win = max(app_win, 2U*tp->advmss);
356 357
357 if (!ofo_win) 358 if (!ofo_win)
@@ -549,8 +550,10 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
549 * To save cycles in the RFC 1323 implementation it was better to break 550 * To save cycles in the RFC 1323 implementation it was better to break
550 * it up into three procedures. -- erics 551 * it up into three procedures. -- erics
551 */ 552 */
552static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt, u32 *usrtt) 553static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt, u32 *usrtt)
553{ 554{
555 struct tcp_sock *tp = tcp_sk(sk);
556 const struct inet_connection_sock *icsk = inet_csk(sk);
554 long m = mrtt; /* RTT */ 557 long m = mrtt; /* RTT */
555 558
556 /* The following amusing code comes from Jacobson's 559 /* The following amusing code comes from Jacobson's
@@ -610,8 +613,8 @@ static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt, u32 *usrtt)
610 tp->rtt_seq = tp->snd_nxt; 613 tp->rtt_seq = tp->snd_nxt;
611 } 614 }
612 615
613 if (tp->ca_ops->rtt_sample) 616 if (icsk->icsk_ca_ops->rtt_sample)
614 tp->ca_ops->rtt_sample(tp, *usrtt); 617 icsk->icsk_ca_ops->rtt_sample(sk, *usrtt);
615} 618}
616 619
617/* Calculate rto without backoff. This is the second half of Van Jacobson's 620/* Calculate rto without backoff. This is the second half of Van Jacobson's
@@ -663,9 +666,10 @@ void tcp_update_metrics(struct sock *sk)
663 dst_confirm(dst); 666 dst_confirm(dst);
664 667
665 if (dst && (dst->flags&DST_HOST)) { 668 if (dst && (dst->flags&DST_HOST)) {
669 const struct inet_connection_sock *icsk = inet_csk(sk);
666 int m; 670 int m;
667 671
668 if (inet_csk(sk)->icsk_backoff || !tp->srtt) { 672 if (icsk->icsk_backoff || !tp->srtt) {
669 /* This session failed to estimate rtt. Why? 673 /* This session failed to estimate rtt. Why?
670 * Probably, no packets returned in time. 674 * Probably, no packets returned in time.
671 * Reset our results. 675 * Reset our results.
@@ -714,7 +718,7 @@ void tcp_update_metrics(struct sock *sk)
714 tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) 718 tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
715 dst->metrics[RTAX_CWND-1] = tp->snd_cwnd; 719 dst->metrics[RTAX_CWND-1] = tp->snd_cwnd;
716 } else if (tp->snd_cwnd > tp->snd_ssthresh && 720 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
717 tp->ca_state == TCP_CA_Open) { 721 icsk->icsk_ca_state == TCP_CA_Open) {
718 /* Cong. avoidance phase, cwnd is reliable. */ 722 /* Cong. avoidance phase, cwnd is reliable. */
719 if (!dst_metric_locked(dst, RTAX_SSTHRESH)) 723 if (!dst_metric_locked(dst, RTAX_SSTHRESH))
720 dst->metrics[RTAX_SSTHRESH-1] = 724 dst->metrics[RTAX_SSTHRESH-1] =
@@ -828,8 +832,10 @@ reset:
828 } 832 }
829} 833}
830 834
831static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts) 835static void tcp_update_reordering(struct sock *sk, const int metric,
836 const int ts)
832{ 837{
838 struct tcp_sock *tp = tcp_sk(sk);
833 if (metric > tp->reordering) { 839 if (metric > tp->reordering) {
834 tp->reordering = min(TCP_MAX_REORDERING, metric); 840 tp->reordering = min(TCP_MAX_REORDERING, metric);
835 841
@@ -844,7 +850,7 @@ static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts)
844 NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); 850 NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
845#if FASTRETRANS_DEBUG > 1 851#if FASTRETRANS_DEBUG > 1
846 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 852 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
847 tp->rx_opt.sack_ok, tp->ca_state, 853 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
848 tp->reordering, 854 tp->reordering,
849 tp->fackets_out, 855 tp->fackets_out,
850 tp->sacked_out, 856 tp->sacked_out,
@@ -906,6 +912,7 @@ static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts)
906static int 912static int
907tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una) 913tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
908{ 914{
915 const struct inet_connection_sock *icsk = inet_csk(sk);
909 struct tcp_sock *tp = tcp_sk(sk); 916 struct tcp_sock *tp = tcp_sk(sk);
910 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked; 917 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
911 struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2); 918 struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2);
@@ -1071,7 +1078,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1071 * we have to account for reordering! Ugly, 1078 * we have to account for reordering! Ugly,
1072 * but should help. 1079 * but should help.
1073 */ 1080 */
1074 if (lost_retrans && tp->ca_state == TCP_CA_Recovery) { 1081 if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) {
1075 struct sk_buff *skb; 1082 struct sk_buff *skb;
1076 1083
1077 sk_stream_for_retrans_queue(skb, sk) { 1084 sk_stream_for_retrans_queue(skb, sk) {
@@ -1100,8 +1107,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1100 1107
1101 tp->left_out = tp->sacked_out + tp->lost_out; 1108 tp->left_out = tp->sacked_out + tp->lost_out;
1102 1109
1103 if ((reord < tp->fackets_out) && tp->ca_state != TCP_CA_Loss) 1110 if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss)
1104 tcp_update_reordering(tp, ((tp->fackets_out + 1) - reord), 0); 1111 tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
1105 1112
1106#if FASTRETRANS_DEBUG > 0 1113#if FASTRETRANS_DEBUG > 0
1107 BUG_TRAP((int)tp->sacked_out >= 0); 1114 BUG_TRAP((int)tp->sacked_out >= 0);
@@ -1118,17 +1125,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1118 */ 1125 */
1119void tcp_enter_frto(struct sock *sk) 1126void tcp_enter_frto(struct sock *sk)
1120{ 1127{
1128 const struct inet_connection_sock *icsk = inet_csk(sk);
1121 struct tcp_sock *tp = tcp_sk(sk); 1129 struct tcp_sock *tp = tcp_sk(sk);
1122 struct sk_buff *skb; 1130 struct sk_buff *skb;
1123 1131
1124 tp->frto_counter = 1; 1132 tp->frto_counter = 1;
1125 1133
1126 if (tp->ca_state <= TCP_CA_Disorder || 1134 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
1127 tp->snd_una == tp->high_seq || 1135 tp->snd_una == tp->high_seq ||
1128 (tp->ca_state == TCP_CA_Loss && !inet_csk(sk)->icsk_retransmits)) { 1136 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1129 tp->prior_ssthresh = tcp_current_ssthresh(tp); 1137 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1130 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 1138 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1131 tcp_ca_event(tp, CA_EVENT_FRTO); 1139 tcp_ca_event(sk, CA_EVENT_FRTO);
1132 } 1140 }
1133 1141
1134 /* Have to clear retransmission markers here to keep the bookkeeping 1142 /* Have to clear retransmission markers here to keep the bookkeeping
@@ -1145,7 +1153,7 @@ void tcp_enter_frto(struct sock *sk)
1145 } 1153 }
1146 tcp_sync_left_out(tp); 1154 tcp_sync_left_out(tp);
1147 1155
1148 tcp_set_ca_state(tp, TCP_CA_Open); 1156 tcp_set_ca_state(sk, TCP_CA_Open);
1149 tp->frto_highmark = tp->snd_nxt; 1157 tp->frto_highmark = tp->snd_nxt;
1150} 1158}
1151 1159
@@ -1191,7 +1199,7 @@ static void tcp_enter_frto_loss(struct sock *sk)
1191 1199
1192 tp->reordering = min_t(unsigned int, tp->reordering, 1200 tp->reordering = min_t(unsigned int, tp->reordering,
1193 sysctl_tcp_reordering); 1201 sysctl_tcp_reordering);
1194 tcp_set_ca_state(tp, TCP_CA_Loss); 1202 tcp_set_ca_state(sk, TCP_CA_Loss);
1195 tp->high_seq = tp->frto_highmark; 1203 tp->high_seq = tp->frto_highmark;
1196 TCP_ECN_queue_cwr(tp); 1204 TCP_ECN_queue_cwr(tp);
1197} 1205}
@@ -1215,16 +1223,17 @@ void tcp_clear_retrans(struct tcp_sock *tp)
1215 */ 1223 */
1216void tcp_enter_loss(struct sock *sk, int how) 1224void tcp_enter_loss(struct sock *sk, int how)
1217{ 1225{
1226 const struct inet_connection_sock *icsk = inet_csk(sk);
1218 struct tcp_sock *tp = tcp_sk(sk); 1227 struct tcp_sock *tp = tcp_sk(sk);
1219 struct sk_buff *skb; 1228 struct sk_buff *skb;
1220 int cnt = 0; 1229 int cnt = 0;
1221 1230
1222 /* Reduce ssthresh if it has not yet been made inside this window. */ 1231 /* Reduce ssthresh if it has not yet been made inside this window. */
1223 if (tp->ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || 1232 if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq ||
1224 (tp->ca_state == TCP_CA_Loss && !inet_csk(sk)->icsk_retransmits)) { 1233 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1225 tp->prior_ssthresh = tcp_current_ssthresh(tp); 1234 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1226 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 1235 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1227 tcp_ca_event(tp, CA_EVENT_LOSS); 1236 tcp_ca_event(sk, CA_EVENT_LOSS);
1228 } 1237 }
1229 tp->snd_cwnd = 1; 1238 tp->snd_cwnd = 1;
1230 tp->snd_cwnd_cnt = 0; 1239 tp->snd_cwnd_cnt = 0;
@@ -1255,7 +1264,7 @@ void tcp_enter_loss(struct sock *sk, int how)
1255 1264
1256 tp->reordering = min_t(unsigned int, tp->reordering, 1265 tp->reordering = min_t(unsigned int, tp->reordering,
1257 sysctl_tcp_reordering); 1266 sysctl_tcp_reordering);
1258 tcp_set_ca_state(tp, TCP_CA_Loss); 1267 tcp_set_ca_state(sk, TCP_CA_Loss);
1259 tp->high_seq = tp->snd_nxt; 1268 tp->high_seq = tp->snd_nxt;
1260 TCP_ECN_queue_cwr(tp); 1269 TCP_ECN_queue_cwr(tp);
1261} 1270}
@@ -1272,13 +1281,14 @@ static int tcp_check_sack_reneging(struct sock *sk)
1272 */ 1281 */
1273 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL && 1282 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
1274 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 1283 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
1284 struct inet_connection_sock *icsk = inet_csk(sk);
1275 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); 1285 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
1276 1286
1277 tcp_enter_loss(sk, 1); 1287 tcp_enter_loss(sk, 1);
1278 inet_csk(sk)->icsk_retransmits++; 1288 icsk->icsk_retransmits++;
1279 tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)); 1289 tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
1280 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 1290 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1281 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 1291 icsk->icsk_rto, TCP_RTO_MAX);
1282 return 1; 1292 return 1;
1283 } 1293 }
1284 return 0; 1294 return 0;
@@ -1431,8 +1441,9 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
1431 * in assumption of absent reordering, interpret this as reordering. 1441 * in assumption of absent reordering, interpret this as reordering.
1432 * The only another reason could be bug in receiver TCP. 1442 * The only another reason could be bug in receiver TCP.
1433 */ 1443 */
1434static void tcp_check_reno_reordering(struct tcp_sock *tp, int addend) 1444static void tcp_check_reno_reordering(struct sock *sk, const int addend)
1435{ 1445{
1446 struct tcp_sock *tp = tcp_sk(sk);
1436 u32 holes; 1447 u32 holes;
1437 1448
1438 holes = max(tp->lost_out, 1U); 1449 holes = max(tp->lost_out, 1U);
@@ -1440,16 +1451,17 @@ static void tcp_check_reno_reordering(struct tcp_sock *tp, int addend)
1440 1451
1441 if ((tp->sacked_out + holes) > tp->packets_out) { 1452 if ((tp->sacked_out + holes) > tp->packets_out) {
1442 tp->sacked_out = tp->packets_out - holes; 1453 tp->sacked_out = tp->packets_out - holes;
1443 tcp_update_reordering(tp, tp->packets_out+addend, 0); 1454 tcp_update_reordering(sk, tp->packets_out + addend, 0);
1444 } 1455 }
1445} 1456}
1446 1457
1447/* Emulate SACKs for SACKless connection: account for a new dupack. */ 1458/* Emulate SACKs for SACKless connection: account for a new dupack. */
1448 1459
1449static void tcp_add_reno_sack(struct tcp_sock *tp) 1460static void tcp_add_reno_sack(struct sock *sk)
1450{ 1461{
1462 struct tcp_sock *tp = tcp_sk(sk);
1451 tp->sacked_out++; 1463 tp->sacked_out++;
1452 tcp_check_reno_reordering(tp, 0); 1464 tcp_check_reno_reordering(sk, 0);
1453 tcp_sync_left_out(tp); 1465 tcp_sync_left_out(tp);
1454} 1466}
1455 1467
@@ -1464,7 +1476,7 @@ static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acke
1464 else 1476 else
1465 tp->sacked_out -= acked-1; 1477 tp->sacked_out -= acked-1;
1466 } 1478 }
1467 tcp_check_reno_reordering(tp, acked); 1479 tcp_check_reno_reordering(sk, acked);
1468 tcp_sync_left_out(tp); 1480 tcp_sync_left_out(tp);
1469} 1481}
1470 1482
@@ -1538,14 +1550,16 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
1538} 1550}
1539 1551
1540/* Decrease cwnd each second ack. */ 1552/* Decrease cwnd each second ack. */
1541static void tcp_cwnd_down(struct tcp_sock *tp) 1553static void tcp_cwnd_down(struct sock *sk)
1542{ 1554{
1555 const struct inet_connection_sock *icsk = inet_csk(sk);
1556 struct tcp_sock *tp = tcp_sk(sk);
1543 int decr = tp->snd_cwnd_cnt + 1; 1557 int decr = tp->snd_cwnd_cnt + 1;
1544 1558
1545 tp->snd_cwnd_cnt = decr&1; 1559 tp->snd_cwnd_cnt = decr&1;
1546 decr >>= 1; 1560 decr >>= 1;
1547 1561
1548 if (decr && tp->snd_cwnd > tp->ca_ops->min_cwnd(tp)) 1562 if (decr && tp->snd_cwnd > icsk->icsk_ca_ops->min_cwnd(sk))
1549 tp->snd_cwnd -= decr; 1563 tp->snd_cwnd -= decr;
1550 1564
1551 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); 1565 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
@@ -1579,11 +1593,15 @@ static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg)
1579#define DBGUNDO(x...) do { } while (0) 1593#define DBGUNDO(x...) do { } while (0)
1580#endif 1594#endif
1581 1595
1582static void tcp_undo_cwr(struct tcp_sock *tp, int undo) 1596static void tcp_undo_cwr(struct sock *sk, const int undo)
1583{ 1597{
1598 struct tcp_sock *tp = tcp_sk(sk);
1599
1584 if (tp->prior_ssthresh) { 1600 if (tp->prior_ssthresh) {
1585 if (tp->ca_ops->undo_cwnd) 1601 const struct inet_connection_sock *icsk = inet_csk(sk);
1586 tp->snd_cwnd = tp->ca_ops->undo_cwnd(tp); 1602
1603 if (icsk->icsk_ca_ops->undo_cwnd)
1604 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
1587 else 1605 else
1588 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1); 1606 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1);
1589 1607
@@ -1611,9 +1629,9 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
1611 /* Happy end! We did not retransmit anything 1629 /* Happy end! We did not retransmit anything
1612 * or our original transmission succeeded. 1630 * or our original transmission succeeded.
1613 */ 1631 */
1614 DBGUNDO(sk, tp, tp->ca_state == TCP_CA_Loss ? "loss" : "retrans"); 1632 DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
1615 tcp_undo_cwr(tp, 1); 1633 tcp_undo_cwr(sk, 1);
1616 if (tp->ca_state == TCP_CA_Loss) 1634 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
1617 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 1635 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
1618 else 1636 else
1619 NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); 1637 NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
@@ -1626,7 +1644,7 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
1626 tcp_moderate_cwnd(tp); 1644 tcp_moderate_cwnd(tp);
1627 return 1; 1645 return 1;
1628 } 1646 }
1629 tcp_set_ca_state(tp, TCP_CA_Open); 1647 tcp_set_ca_state(sk, TCP_CA_Open);
1630 return 0; 1648 return 0;
1631} 1649}
1632 1650
@@ -1635,7 +1653,7 @@ static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp)
1635{ 1653{
1636 if (tp->undo_marker && !tp->undo_retrans) { 1654 if (tp->undo_marker && !tp->undo_retrans) {
1637 DBGUNDO(sk, tp, "D-SACK"); 1655 DBGUNDO(sk, tp, "D-SACK");
1638 tcp_undo_cwr(tp, 1); 1656 tcp_undo_cwr(sk, 1);
1639 tp->undo_marker = 0; 1657 tp->undo_marker = 0;
1640 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); 1658 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
1641 } 1659 }
@@ -1656,10 +1674,10 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
1656 if (tp->retrans_out == 0) 1674 if (tp->retrans_out == 0)
1657 tp->retrans_stamp = 0; 1675 tp->retrans_stamp = 0;
1658 1676
1659 tcp_update_reordering(tp, tcp_fackets_out(tp)+acked, 1); 1677 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
1660 1678
1661 DBGUNDO(sk, tp, "Hoe"); 1679 DBGUNDO(sk, tp, "Hoe");
1662 tcp_undo_cwr(tp, 0); 1680 tcp_undo_cwr(sk, 0);
1663 NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); 1681 NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
1664 1682
1665 /* So... Do not make Hoe's retransmit yet. 1683 /* So... Do not make Hoe's retransmit yet.
@@ -1682,22 +1700,23 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
1682 DBGUNDO(sk, tp, "partial loss"); 1700 DBGUNDO(sk, tp, "partial loss");
1683 tp->lost_out = 0; 1701 tp->lost_out = 0;
1684 tp->left_out = tp->sacked_out; 1702 tp->left_out = tp->sacked_out;
1685 tcp_undo_cwr(tp, 1); 1703 tcp_undo_cwr(sk, 1);
1686 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 1704 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
1687 inet_csk(sk)->icsk_retransmits = 0; 1705 inet_csk(sk)->icsk_retransmits = 0;
1688 tp->undo_marker = 0; 1706 tp->undo_marker = 0;
1689 if (!IsReno(tp)) 1707 if (!IsReno(tp))
1690 tcp_set_ca_state(tp, TCP_CA_Open); 1708 tcp_set_ca_state(sk, TCP_CA_Open);
1691 return 1; 1709 return 1;
1692 } 1710 }
1693 return 0; 1711 return 0;
1694} 1712}
1695 1713
1696static inline void tcp_complete_cwr(struct tcp_sock *tp) 1714static inline void tcp_complete_cwr(struct sock *sk)
1697{ 1715{
1716 struct tcp_sock *tp = tcp_sk(sk);
1698 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 1717 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
1699 tp->snd_cwnd_stamp = tcp_time_stamp; 1718 tp->snd_cwnd_stamp = tcp_time_stamp;
1700 tcp_ca_event(tp, CA_EVENT_COMPLETE_CWR); 1719 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
1701} 1720}
1702 1721
1703static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag) 1722static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
@@ -1708,21 +1727,21 @@ static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
1708 tp->retrans_stamp = 0; 1727 tp->retrans_stamp = 0;
1709 1728
1710 if (flag&FLAG_ECE) 1729 if (flag&FLAG_ECE)
1711 tcp_enter_cwr(tp); 1730 tcp_enter_cwr(sk);
1712 1731
1713 if (tp->ca_state != TCP_CA_CWR) { 1732 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
1714 int state = TCP_CA_Open; 1733 int state = TCP_CA_Open;
1715 1734
1716 if (tp->left_out || tp->retrans_out || tp->undo_marker) 1735 if (tp->left_out || tp->retrans_out || tp->undo_marker)
1717 state = TCP_CA_Disorder; 1736 state = TCP_CA_Disorder;
1718 1737
1719 if (tp->ca_state != state) { 1738 if (inet_csk(sk)->icsk_ca_state != state) {
1720 tcp_set_ca_state(tp, state); 1739 tcp_set_ca_state(sk, state);
1721 tp->high_seq = tp->snd_nxt; 1740 tp->high_seq = tp->snd_nxt;
1722 } 1741 }
1723 tcp_moderate_cwnd(tp); 1742 tcp_moderate_cwnd(tp);
1724 } else { 1743 } else {
1725 tcp_cwnd_down(tp); 1744 tcp_cwnd_down(sk);
1726 } 1745 }
1727} 1746}
1728 1747
@@ -1741,6 +1760,7 @@ static void
1741tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, 1760tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1742 int prior_packets, int flag) 1761 int prior_packets, int flag)
1743{ 1762{
1763 struct inet_connection_sock *icsk = inet_csk(sk);
1744 struct tcp_sock *tp = tcp_sk(sk); 1764 struct tcp_sock *tp = tcp_sk(sk);
1745 int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP)); 1765 int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP));
1746 1766
@@ -1764,7 +1784,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1764 /* C. Process data loss notification, provided it is valid. */ 1784 /* C. Process data loss notification, provided it is valid. */
1765 if ((flag&FLAG_DATA_LOST) && 1785 if ((flag&FLAG_DATA_LOST) &&
1766 before(tp->snd_una, tp->high_seq) && 1786 before(tp->snd_una, tp->high_seq) &&
1767 tp->ca_state != TCP_CA_Open && 1787 icsk->icsk_ca_state != TCP_CA_Open &&
1768 tp->fackets_out > tp->reordering) { 1788 tp->fackets_out > tp->reordering) {
1769 tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq); 1789 tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
1770 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); 1790 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
@@ -1775,14 +1795,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1775 1795
1776 /* E. Check state exit conditions. State can be terminated 1796 /* E. Check state exit conditions. State can be terminated
1777 * when high_seq is ACKed. */ 1797 * when high_seq is ACKed. */
1778 if (tp->ca_state == TCP_CA_Open) { 1798 if (icsk->icsk_ca_state == TCP_CA_Open) {
1779 if (!sysctl_tcp_frto) 1799 if (!sysctl_tcp_frto)
1780 BUG_TRAP(tp->retrans_out == 0); 1800 BUG_TRAP(tp->retrans_out == 0);
1781 tp->retrans_stamp = 0; 1801 tp->retrans_stamp = 0;
1782 } else if (!before(tp->snd_una, tp->high_seq)) { 1802 } else if (!before(tp->snd_una, tp->high_seq)) {
1783 switch (tp->ca_state) { 1803 switch (icsk->icsk_ca_state) {
1784 case TCP_CA_Loss: 1804 case TCP_CA_Loss:
1785 inet_csk(sk)->icsk_retransmits = 0; 1805 icsk->icsk_retransmits = 0;
1786 if (tcp_try_undo_recovery(sk, tp)) 1806 if (tcp_try_undo_recovery(sk, tp))
1787 return; 1807 return;
1788 break; 1808 break;
@@ -1791,8 +1811,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1791 /* CWR is to be held something *above* high_seq 1811 /* CWR is to be held something *above* high_seq
1792 * is ACKed for CWR bit to reach receiver. */ 1812 * is ACKed for CWR bit to reach receiver. */
1793 if (tp->snd_una != tp->high_seq) { 1813 if (tp->snd_una != tp->high_seq) {
1794 tcp_complete_cwr(tp); 1814 tcp_complete_cwr(sk);
1795 tcp_set_ca_state(tp, TCP_CA_Open); 1815 tcp_set_ca_state(sk, TCP_CA_Open);
1796 } 1816 }
1797 break; 1817 break;
1798 1818
@@ -1803,7 +1823,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1803 * catching for all duplicate ACKs. */ 1823 * catching for all duplicate ACKs. */
1804 IsReno(tp) || tp->snd_una != tp->high_seq) { 1824 IsReno(tp) || tp->snd_una != tp->high_seq) {
1805 tp->undo_marker = 0; 1825 tp->undo_marker = 0;
1806 tcp_set_ca_state(tp, TCP_CA_Open); 1826 tcp_set_ca_state(sk, TCP_CA_Open);
1807 } 1827 }
1808 break; 1828 break;
1809 1829
@@ -1812,17 +1832,17 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1812 tcp_reset_reno_sack(tp); 1832 tcp_reset_reno_sack(tp);
1813 if (tcp_try_undo_recovery(sk, tp)) 1833 if (tcp_try_undo_recovery(sk, tp))
1814 return; 1834 return;
1815 tcp_complete_cwr(tp); 1835 tcp_complete_cwr(sk);
1816 break; 1836 break;
1817 } 1837 }
1818 } 1838 }
1819 1839
1820 /* F. Process state. */ 1840 /* F. Process state. */
1821 switch (tp->ca_state) { 1841 switch (icsk->icsk_ca_state) {
1822 case TCP_CA_Recovery: 1842 case TCP_CA_Recovery:
1823 if (prior_snd_una == tp->snd_una) { 1843 if (prior_snd_una == tp->snd_una) {
1824 if (IsReno(tp) && is_dupack) 1844 if (IsReno(tp) && is_dupack)
1825 tcp_add_reno_sack(tp); 1845 tcp_add_reno_sack(sk);
1826 } else { 1846 } else {
1827 int acked = prior_packets - tp->packets_out; 1847 int acked = prior_packets - tp->packets_out;
1828 if (IsReno(tp)) 1848 if (IsReno(tp))
@@ -1832,13 +1852,13 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1832 break; 1852 break;
1833 case TCP_CA_Loss: 1853 case TCP_CA_Loss:
1834 if (flag&FLAG_DATA_ACKED) 1854 if (flag&FLAG_DATA_ACKED)
1835 inet_csk(sk)->icsk_retransmits = 0; 1855 icsk->icsk_retransmits = 0;
1836 if (!tcp_try_undo_loss(sk, tp)) { 1856 if (!tcp_try_undo_loss(sk, tp)) {
1837 tcp_moderate_cwnd(tp); 1857 tcp_moderate_cwnd(tp);
1838 tcp_xmit_retransmit_queue(sk); 1858 tcp_xmit_retransmit_queue(sk);
1839 return; 1859 return;
1840 } 1860 }
1841 if (tp->ca_state != TCP_CA_Open) 1861 if (icsk->icsk_ca_state != TCP_CA_Open)
1842 return; 1862 return;
1843 /* Loss is undone; fall through to processing in Open state. */ 1863 /* Loss is undone; fall through to processing in Open state. */
1844 default: 1864 default:
@@ -1846,10 +1866,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1846 if (tp->snd_una != prior_snd_una) 1866 if (tp->snd_una != prior_snd_una)
1847 tcp_reset_reno_sack(tp); 1867 tcp_reset_reno_sack(tp);
1848 if (is_dupack) 1868 if (is_dupack)
1849 tcp_add_reno_sack(tp); 1869 tcp_add_reno_sack(sk);
1850 } 1870 }
1851 1871
1852 if (tp->ca_state == TCP_CA_Disorder) 1872 if (icsk->icsk_ca_state == TCP_CA_Disorder)
1853 tcp_try_undo_dsack(sk, tp); 1873 tcp_try_undo_dsack(sk, tp);
1854 1874
1855 if (!tcp_time_to_recover(sk, tp)) { 1875 if (!tcp_time_to_recover(sk, tp)) {
@@ -1869,20 +1889,20 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1869 tp->undo_marker = tp->snd_una; 1889 tp->undo_marker = tp->snd_una;
1870 tp->undo_retrans = tp->retrans_out; 1890 tp->undo_retrans = tp->retrans_out;
1871 1891
1872 if (tp->ca_state < TCP_CA_CWR) { 1892 if (icsk->icsk_ca_state < TCP_CA_CWR) {
1873 if (!(flag&FLAG_ECE)) 1893 if (!(flag&FLAG_ECE))
1874 tp->prior_ssthresh = tcp_current_ssthresh(tp); 1894 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1875 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 1895 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1876 TCP_ECN_queue_cwr(tp); 1896 TCP_ECN_queue_cwr(tp);
1877 } 1897 }
1878 1898
1879 tp->snd_cwnd_cnt = 0; 1899 tp->snd_cwnd_cnt = 0;
1880 tcp_set_ca_state(tp, TCP_CA_Recovery); 1900 tcp_set_ca_state(sk, TCP_CA_Recovery);
1881 } 1901 }
1882 1902
1883 if (is_dupack || tcp_head_timedout(sk, tp)) 1903 if (is_dupack || tcp_head_timedout(sk, tp))
1884 tcp_update_scoreboard(sk, tp); 1904 tcp_update_scoreboard(sk, tp);
1885 tcp_cwnd_down(tp); 1905 tcp_cwnd_down(sk);
1886 tcp_xmit_retransmit_queue(sk); 1906 tcp_xmit_retransmit_queue(sk);
1887} 1907}
1888 1908
@@ -1908,7 +1928,7 @@ static void tcp_ack_saw_tstamp(struct sock *sk, u32 *usrtt, int flag)
1908 */ 1928 */
1909 struct tcp_sock *tp = tcp_sk(sk); 1929 struct tcp_sock *tp = tcp_sk(sk);
1910 const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 1930 const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
1911 tcp_rtt_estimator(tp, seq_rtt, usrtt); 1931 tcp_rtt_estimator(sk, seq_rtt, usrtt);
1912 tcp_set_rto(sk); 1932 tcp_set_rto(sk);
1913 inet_csk(sk)->icsk_backoff = 0; 1933 inet_csk(sk)->icsk_backoff = 0;
1914 tcp_bound_rto(sk); 1934 tcp_bound_rto(sk);
@@ -1928,7 +1948,7 @@ static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, u32 *usrtt, int flag
1928 if (flag & FLAG_RETRANS_DATA_ACKED) 1948 if (flag & FLAG_RETRANS_DATA_ACKED)
1929 return; 1949 return;
1930 1950
1931 tcp_rtt_estimator(tcp_sk(sk), seq_rtt, usrtt); 1951 tcp_rtt_estimator(sk, seq_rtt, usrtt);
1932 tcp_set_rto(sk); 1952 tcp_set_rto(sk);
1933 inet_csk(sk)->icsk_backoff = 0; 1953 inet_csk(sk)->icsk_backoff = 0;
1934 tcp_bound_rto(sk); 1954 tcp_bound_rto(sk);
@@ -1945,11 +1965,12 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
1945 tcp_ack_no_tstamp(sk, seq_rtt, usrtt, flag); 1965 tcp_ack_no_tstamp(sk, seq_rtt, usrtt, flag);
1946} 1966}
1947 1967
1948static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, 1968static inline void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
1949 u32 in_flight, int good) 1969 u32 in_flight, int good)
1950{ 1970{
1951 tp->ca_ops->cong_avoid(tp, ack, rtt, in_flight, good); 1971 const struct inet_connection_sock *icsk = inet_csk(sk);
1952 tp->snd_cwnd_stamp = tcp_time_stamp; 1972 icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good);
1973 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
1953} 1974}
1954 1975
1955/* Restart timer after forward progress on connection. 1976/* Restart timer after forward progress on connection.
@@ -2098,11 +2119,12 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2098 } 2119 }
2099 2120
2100 if (acked&FLAG_ACKED) { 2121 if (acked&FLAG_ACKED) {
2122 const struct inet_connection_sock *icsk = inet_csk(sk);
2101 tcp_ack_update_rtt(sk, acked, seq_rtt, seq_usrtt); 2123 tcp_ack_update_rtt(sk, acked, seq_rtt, seq_usrtt);
2102 tcp_ack_packets_out(sk, tp); 2124 tcp_ack_packets_out(sk, tp);
2103 2125
2104 if (tp->ca_ops->pkts_acked) 2126 if (icsk->icsk_ca_ops->pkts_acked)
2105 tp->ca_ops->pkts_acked(tp, pkts_acked); 2127 icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked);
2106 } 2128 }
2107 2129
2108#if FASTRETRANS_DEBUG > 0 2130#if FASTRETRANS_DEBUG > 0
@@ -2110,19 +2132,20 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2110 BUG_TRAP((int)tp->lost_out >= 0); 2132 BUG_TRAP((int)tp->lost_out >= 0);
2111 BUG_TRAP((int)tp->retrans_out >= 0); 2133 BUG_TRAP((int)tp->retrans_out >= 0);
2112 if (!tp->packets_out && tp->rx_opt.sack_ok) { 2134 if (!tp->packets_out && tp->rx_opt.sack_ok) {
2135 const struct inet_connection_sock *icsk = inet_csk(sk);
2113 if (tp->lost_out) { 2136 if (tp->lost_out) {
2114 printk(KERN_DEBUG "Leak l=%u %d\n", 2137 printk(KERN_DEBUG "Leak l=%u %d\n",
2115 tp->lost_out, tp->ca_state); 2138 tp->lost_out, icsk->icsk_ca_state);
2116 tp->lost_out = 0; 2139 tp->lost_out = 0;
2117 } 2140 }
2118 if (tp->sacked_out) { 2141 if (tp->sacked_out) {
2119 printk(KERN_DEBUG "Leak s=%u %d\n", 2142 printk(KERN_DEBUG "Leak s=%u %d\n",
2120 tp->sacked_out, tp->ca_state); 2143 tp->sacked_out, icsk->icsk_ca_state);
2121 tp->sacked_out = 0; 2144 tp->sacked_out = 0;
2122 } 2145 }
2123 if (tp->retrans_out) { 2146 if (tp->retrans_out) {
2124 printk(KERN_DEBUG "Leak r=%u %d\n", 2147 printk(KERN_DEBUG "Leak r=%u %d\n",
2125 tp->retrans_out, tp->ca_state); 2148 tp->retrans_out, icsk->icsk_ca_state);
2126 tp->retrans_out = 0; 2149 tp->retrans_out = 0;
2127 } 2150 }
2128 } 2151 }
@@ -2152,16 +2175,17 @@ static void tcp_ack_probe(struct sock *sk)
2152 } 2175 }
2153} 2176}
2154 2177
2155static inline int tcp_ack_is_dubious(struct tcp_sock *tp, int flag) 2178static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
2156{ 2179{
2157 return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 2180 return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
2158 tp->ca_state != TCP_CA_Open); 2181 inet_csk(sk)->icsk_ca_state != TCP_CA_Open);
2159} 2182}
2160 2183
2161static inline int tcp_may_raise_cwnd(struct tcp_sock *tp, int flag) 2184static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
2162{ 2185{
2186 const struct tcp_sock *tp = tcp_sk(sk);
2163 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && 2187 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
2164 !((1<<tp->ca_state)&(TCPF_CA_Recovery|TCPF_CA_CWR)); 2188 !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR));
2165} 2189}
2166 2190
2167/* Check that window update is acceptable. 2191/* Check that window update is acceptable.
@@ -2251,6 +2275,7 @@ static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
2251/* This routine deals with incoming acks, but not outgoing ones. */ 2275/* This routine deals with incoming acks, but not outgoing ones. */
2252static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) 2276static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2253{ 2277{
2278 struct inet_connection_sock *icsk = inet_csk(sk);
2254 struct tcp_sock *tp = tcp_sk(sk); 2279 struct tcp_sock *tp = tcp_sk(sk);
2255 u32 prior_snd_una = tp->snd_una; 2280 u32 prior_snd_una = tp->snd_una;
2256 u32 ack_seq = TCP_SKB_CB(skb)->seq; 2281 u32 ack_seq = TCP_SKB_CB(skb)->seq;
@@ -2278,7 +2303,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2278 tp->snd_una = ack; 2303 tp->snd_una = ack;
2279 flag |= FLAG_WIN_UPDATE; 2304 flag |= FLAG_WIN_UPDATE;
2280 2305
2281 tcp_ca_event(tp, CA_EVENT_FAST_ACK); 2306 tcp_ca_event(sk, CA_EVENT_FAST_ACK);
2282 2307
2283 NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS); 2308 NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
2284 } else { 2309 } else {
@@ -2295,7 +2320,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2295 if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th)) 2320 if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))
2296 flag |= FLAG_ECE; 2321 flag |= FLAG_ECE;
2297 2322
2298 tcp_ca_event(tp, CA_EVENT_SLOW_ACK); 2323 tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
2299 } 2324 }
2300 2325
2301 /* We passed data and got it acked, remove any soft error 2326 /* We passed data and got it acked, remove any soft error
@@ -2311,19 +2336,19 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2311 2336
2312 /* See if we can take anything off of the retransmit queue. */ 2337 /* See if we can take anything off of the retransmit queue. */
2313 flag |= tcp_clean_rtx_queue(sk, &seq_rtt, 2338 flag |= tcp_clean_rtx_queue(sk, &seq_rtt,
2314 tp->ca_ops->rtt_sample ? &seq_usrtt : NULL); 2339 icsk->icsk_ca_ops->rtt_sample ? &seq_usrtt : NULL);
2315 2340
2316 if (tp->frto_counter) 2341 if (tp->frto_counter)
2317 tcp_process_frto(sk, prior_snd_una); 2342 tcp_process_frto(sk, prior_snd_una);
2318 2343
2319 if (tcp_ack_is_dubious(tp, flag)) { 2344 if (tcp_ack_is_dubious(sk, flag)) {
2320 /* Advanve CWND, if state allows this. */ 2345 /* Advanve CWND, if state allows this. */
2321 if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(tp, flag)) 2346 if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
2322 tcp_cong_avoid(tp, ack, seq_rtt, prior_in_flight, 0); 2347 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0);
2323 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); 2348 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
2324 } else { 2349 } else {
2325 if ((flag & FLAG_DATA_ACKED)) 2350 if ((flag & FLAG_DATA_ACKED))
2326 tcp_cong_avoid(tp, ack, seq_rtt, prior_in_flight, 1); 2351 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1);
2327 } 2352 }
2328 2353
2329 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP)) 2354 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
@@ -2332,7 +2357,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2332 return 1; 2357 return 1;
2333 2358
2334no_queue: 2359no_queue:
2335 tp->probes_out = 0; 2360 icsk->icsk_probes_out = 0;
2336 2361
2337 /* If this ack opens up a zero window, clear backoff. It was 2362 /* If this ack opens up a zero window, clear backoff. It was
2338 * being used to time the probes, and is probably far higher than 2363 * being used to time the probes, and is probably far higher than
@@ -3301,12 +3326,12 @@ void tcp_cwnd_application_limited(struct sock *sk)
3301{ 3326{
3302 struct tcp_sock *tp = tcp_sk(sk); 3327 struct tcp_sock *tp = tcp_sk(sk);
3303 3328
3304 if (tp->ca_state == TCP_CA_Open && 3329 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
3305 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 3330 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
3306 /* Limited by application or receiver window. */ 3331 /* Limited by application or receiver window. */
3307 u32 win_used = max(tp->snd_cwnd_used, 2U); 3332 u32 win_used = max(tp->snd_cwnd_used, 2U);
3308 if (win_used < tp->snd_cwnd) { 3333 if (win_used < tp->snd_cwnd) {
3309 tp->snd_ssthresh = tcp_current_ssthresh(tp); 3334 tp->snd_ssthresh = tcp_current_ssthresh(sk);
3310 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 3335 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
3311 } 3336 }
3312 tp->snd_cwnd_used = 0; 3337 tp->snd_cwnd_used = 0;
@@ -3935,7 +3960,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
3935 3960
3936 tcp_init_metrics(sk); 3961 tcp_init_metrics(sk);
3937 3962
3938 tcp_init_congestion_control(tp); 3963 tcp_init_congestion_control(sk);
3939 3964
3940 /* Prevent spurious tcp_cwnd_restart() on first data 3965 /* Prevent spurious tcp_cwnd_restart() on first data
3941 * packet. 3966 * packet.
@@ -4212,7 +4237,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4212 4237
4213 tcp_init_metrics(sk); 4238 tcp_init_metrics(sk);
4214 4239
4215 tcp_init_congestion_control(tp); 4240 tcp_init_congestion_control(sk);
4216 4241
4217 /* Prevent spurious tcp_cwnd_restart() on 4242 /* Prevent spurious tcp_cwnd_restart() on
4218 * first data packet. 4243 * first data packet.