aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/inet_connection_sock.c3
-rw-r--r--net/ipv4/tcp.c12
-rw-r--r--net/ipv4/tcp_bic.c46
-rw-r--r--net/ipv4/tcp_cong.c44
-rw-r--r--net/ipv4/tcp_diag.c16
-rw-r--r--net/ipv4/tcp_highspeed.c17
-rw-r--r--net/ipv4/tcp_htcp.c53
-rw-r--r--net/ipv4/tcp_hybla.c31
-rw-r--r--net/ipv4/tcp_input.c223
-rw-r--r--net/ipv4/tcp_ipv4.c9
-rw-r--r--net/ipv4/tcp_minisocks.c5
-rw-r--r--net/ipv4/tcp_output.c36
-rw-r--r--net/ipv4/tcp_scalable.c6
-rw-r--r--net/ipv4/tcp_timer.c26
-rw-r--r--net/ipv4/tcp_vegas.c44
-rw-r--r--net/ipv4/tcp_westwood.c58
16 files changed, 348 insertions, 281 deletions
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 026630a15ea0..fe3c6d3d0c91 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -508,7 +508,8 @@ struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
508 newsk->sk_write_space = sk_stream_write_space; 508 newsk->sk_write_space = sk_stream_write_space;
509 509
510 newicsk->icsk_retransmits = 0; 510 newicsk->icsk_retransmits = 0;
511 newicsk->icsk_backoff = 0; 511 newicsk->icsk_backoff = 0;
512 newicsk->icsk_probes_out = 0;
512 513
513 /* Deinitialize accept_queue to trap illegal accesses. */ 514 /* Deinitialize accept_queue to trap illegal accesses. */
514 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); 515 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0eed64a1991d..02848e72e9c1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1671,11 +1671,11 @@ int tcp_disconnect(struct sock *sk, int flags)
1671 tp->write_seq = 1; 1671 tp->write_seq = 1;
1672 icsk->icsk_backoff = 0; 1672 icsk->icsk_backoff = 0;
1673 tp->snd_cwnd = 2; 1673 tp->snd_cwnd = 2;
1674 tp->probes_out = 0; 1674 icsk->icsk_probes_out = 0;
1675 tp->packets_out = 0; 1675 tp->packets_out = 0;
1676 tp->snd_ssthresh = 0x7fffffff; 1676 tp->snd_ssthresh = 0x7fffffff;
1677 tp->snd_cwnd_cnt = 0; 1677 tp->snd_cwnd_cnt = 0;
1678 tcp_set_ca_state(tp, TCP_CA_Open); 1678 tcp_set_ca_state(sk, TCP_CA_Open);
1679 tcp_clear_retrans(tp); 1679 tcp_clear_retrans(tp);
1680 inet_csk_delack_init(sk); 1680 inet_csk_delack_init(sk);
1681 sk->sk_send_head = NULL; 1681 sk->sk_send_head = NULL;
@@ -1718,7 +1718,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1718 name[val] = 0; 1718 name[val] = 0;
1719 1719
1720 lock_sock(sk); 1720 lock_sock(sk);
1721 err = tcp_set_congestion_control(tp, name); 1721 err = tcp_set_congestion_control(sk, name);
1722 release_sock(sk); 1722 release_sock(sk);
1723 return err; 1723 return err;
1724 } 1724 }
@@ -1886,9 +1886,9 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
1886 memset(info, 0, sizeof(*info)); 1886 memset(info, 0, sizeof(*info));
1887 1887
1888 info->tcpi_state = sk->sk_state; 1888 info->tcpi_state = sk->sk_state;
1889 info->tcpi_ca_state = tp->ca_state; 1889 info->tcpi_ca_state = icsk->icsk_ca_state;
1890 info->tcpi_retransmits = icsk->icsk_retransmits; 1890 info->tcpi_retransmits = icsk->icsk_retransmits;
1891 info->tcpi_probes = tp->probes_out; 1891 info->tcpi_probes = icsk->icsk_probes_out;
1892 info->tcpi_backoff = icsk->icsk_backoff; 1892 info->tcpi_backoff = icsk->icsk_backoff;
1893 1893
1894 if (tp->rx_opt.tstamp_ok) 1894 if (tp->rx_opt.tstamp_ok)
@@ -2016,7 +2016,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2016 len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 2016 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2017 if (put_user(len, optlen)) 2017 if (put_user(len, optlen))
2018 return -EFAULT; 2018 return -EFAULT;
2019 if (copy_to_user(optval, tp->ca_ops->name, len)) 2019 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2020 return -EFAULT; 2020 return -EFAULT;
2021 return 0; 2021 return 0;
2022 default: 2022 default:
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index ec38d45d6649..b940346de4e7 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -86,11 +86,11 @@ static inline void bictcp_reset(struct bictcp *ca)
86 ca->delayed_ack = 2 << ACK_RATIO_SHIFT; 86 ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
87} 87}
88 88
89static void bictcp_init(struct tcp_sock *tp) 89static void bictcp_init(struct sock *sk)
90{ 90{
91 bictcp_reset(tcp_ca(tp)); 91 bictcp_reset(inet_csk_ca(sk));
92 if (initial_ssthresh) 92 if (initial_ssthresh)
93 tp->snd_ssthresh = initial_ssthresh; 93 tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
94} 94}
95 95
96/* 96/*
@@ -156,9 +156,10 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
156 156
157 157
158/* Detect low utilization in congestion avoidance */ 158/* Detect low utilization in congestion avoidance */
159static inline void bictcp_low_utilization(struct tcp_sock *tp, int flag) 159static inline void bictcp_low_utilization(struct sock *sk, int flag)
160{ 160{
161 struct bictcp *ca = tcp_ca(tp); 161 const struct tcp_sock *tp = tcp_sk(sk);
162 struct bictcp *ca = inet_csk_ca(sk);
162 u32 dist, delay; 163 u32 dist, delay;
163 164
164 /* No time stamp */ 165 /* No time stamp */
@@ -208,12 +209,13 @@ static inline void bictcp_low_utilization(struct tcp_sock *tp, int flag)
208 209
209} 210}
210 211
211static void bictcp_cong_avoid(struct tcp_sock *tp, u32 ack, 212static void bictcp_cong_avoid(struct sock *sk, u32 ack,
212 u32 seq_rtt, u32 in_flight, int data_acked) 213 u32 seq_rtt, u32 in_flight, int data_acked)
213{ 214{
214 struct bictcp *ca = tcp_ca(tp); 215 struct tcp_sock *tp = tcp_sk(sk);
216 struct bictcp *ca = inet_csk_ca(sk);
215 217
216 bictcp_low_utilization(tp, data_acked); 218 bictcp_low_utilization(sk, data_acked);
217 219
218 if (in_flight < tp->snd_cwnd) 220 if (in_flight < tp->snd_cwnd)
219 return; 221 return;
@@ -242,9 +244,10 @@ static void bictcp_cong_avoid(struct tcp_sock *tp, u32 ack,
242 * behave like Reno until low_window is reached, 244 * behave like Reno until low_window is reached,
243 * then increase congestion window slowly 245 * then increase congestion window slowly
244 */ 246 */
245static u32 bictcp_recalc_ssthresh(struct tcp_sock *tp) 247static u32 bictcp_recalc_ssthresh(struct sock *sk)
246{ 248{
247 struct bictcp *ca = tcp_ca(tp); 249 const struct tcp_sock *tp = tcp_sk(sk);
250 struct bictcp *ca = inet_csk_ca(sk);
248 251
249 ca->epoch_start = 0; /* end of epoch */ 252 ca->epoch_start = 0; /* end of epoch */
250 253
@@ -269,31 +272,34 @@ static u32 bictcp_recalc_ssthresh(struct tcp_sock *tp)
269 return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); 272 return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
270} 273}
271 274
272static u32 bictcp_undo_cwnd(struct tcp_sock *tp) 275static u32 bictcp_undo_cwnd(struct sock *sk)
273{ 276{
274 struct bictcp *ca = tcp_ca(tp); 277 const struct tcp_sock *tp = tcp_sk(sk);
275 278 const struct bictcp *ca = inet_csk_ca(sk);
276 return max(tp->snd_cwnd, ca->last_max_cwnd); 279 return max(tp->snd_cwnd, ca->last_max_cwnd);
277} 280}
278 281
279static u32 bictcp_min_cwnd(struct tcp_sock *tp) 282static u32 bictcp_min_cwnd(struct sock *sk)
280{ 283{
284 const struct tcp_sock *tp = tcp_sk(sk);
281 return tp->snd_ssthresh; 285 return tp->snd_ssthresh;
282} 286}
283 287
284static void bictcp_state(struct tcp_sock *tp, u8 new_state) 288static void bictcp_state(struct sock *sk, u8 new_state)
285{ 289{
286 if (new_state == TCP_CA_Loss) 290 if (new_state == TCP_CA_Loss)
287 bictcp_reset(tcp_ca(tp)); 291 bictcp_reset(inet_csk_ca(sk));
288} 292}
289 293
290/* Track delayed acknowledgement ratio using sliding window 294/* Track delayed acknowledgement ratio using sliding window
291 * ratio = (15*ratio + sample) / 16 295 * ratio = (15*ratio + sample) / 16
292 */ 296 */
293static void bictcp_acked(struct tcp_sock *tp, u32 cnt) 297static void bictcp_acked(struct sock *sk, u32 cnt)
294{ 298{
295 if (cnt > 0 && tp->ca_state == TCP_CA_Open) { 299 const struct inet_connection_sock *icsk = inet_csk(sk);
296 struct bictcp *ca = tcp_ca(tp); 300
301 if (cnt > 0 && icsk->icsk_ca_state == TCP_CA_Open) {
302 struct bictcp *ca = inet_csk_ca(sk);
297 cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; 303 cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT;
298 ca->delayed_ack += cnt; 304 ca->delayed_ack += cnt;
299 } 305 }
@@ -314,7 +320,7 @@ static struct tcp_congestion_ops bictcp = {
314 320
315static int __init bictcp_register(void) 321static int __init bictcp_register(void)
316{ 322{
317 BUG_ON(sizeof(struct bictcp) > TCP_CA_PRIV_SIZE); 323 BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE);
318 return tcp_register_congestion_control(&bictcp); 324 return tcp_register_congestion_control(&bictcp);
319} 325}
320 326
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 4970d10a7785..bbf2d6624e89 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -73,33 +73,36 @@ void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
73EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); 73EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
74 74
75/* Assign choice of congestion control. */ 75/* Assign choice of congestion control. */
76void tcp_init_congestion_control(struct tcp_sock *tp) 76void tcp_init_congestion_control(struct sock *sk)
77{ 77{
78 struct inet_connection_sock *icsk = inet_csk(sk);
78 struct tcp_congestion_ops *ca; 79 struct tcp_congestion_ops *ca;
79 80
80 if (tp->ca_ops != &tcp_init_congestion_ops) 81 if (icsk->icsk_ca_ops != &tcp_init_congestion_ops)
81 return; 82 return;
82 83
83 rcu_read_lock(); 84 rcu_read_lock();
84 list_for_each_entry_rcu(ca, &tcp_cong_list, list) { 85 list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
85 if (try_module_get(ca->owner)) { 86 if (try_module_get(ca->owner)) {
86 tp->ca_ops = ca; 87 icsk->icsk_ca_ops = ca;
87 break; 88 break;
88 } 89 }
89 90
90 } 91 }
91 rcu_read_unlock(); 92 rcu_read_unlock();
92 93
93 if (tp->ca_ops->init) 94 if (icsk->icsk_ca_ops->init)
94 tp->ca_ops->init(tp); 95 icsk->icsk_ca_ops->init(sk);
95} 96}
96 97
97/* Manage refcounts on socket close. */ 98/* Manage refcounts on socket close. */
98void tcp_cleanup_congestion_control(struct tcp_sock *tp) 99void tcp_cleanup_congestion_control(struct sock *sk)
99{ 100{
100 if (tp->ca_ops->release) 101 struct inet_connection_sock *icsk = inet_csk(sk);
101 tp->ca_ops->release(tp); 102
102 module_put(tp->ca_ops->owner); 103 if (icsk->icsk_ca_ops->release)
104 icsk->icsk_ca_ops->release(sk);
105 module_put(icsk->icsk_ca_ops->owner);
103} 106}
104 107
105/* Used by sysctl to change default congestion control */ 108/* Used by sysctl to change default congestion control */
@@ -143,14 +146,15 @@ void tcp_get_default_congestion_control(char *name)
143} 146}
144 147
145/* Change congestion control for socket */ 148/* Change congestion control for socket */
146int tcp_set_congestion_control(struct tcp_sock *tp, const char *name) 149int tcp_set_congestion_control(struct sock *sk, const char *name)
147{ 150{
151 struct inet_connection_sock *icsk = inet_csk(sk);
148 struct tcp_congestion_ops *ca; 152 struct tcp_congestion_ops *ca;
149 int err = 0; 153 int err = 0;
150 154
151 rcu_read_lock(); 155 rcu_read_lock();
152 ca = tcp_ca_find(name); 156 ca = tcp_ca_find(name);
153 if (ca == tp->ca_ops) 157 if (ca == icsk->icsk_ca_ops)
154 goto out; 158 goto out;
155 159
156 if (!ca) 160 if (!ca)
@@ -160,10 +164,10 @@ int tcp_set_congestion_control(struct tcp_sock *tp, const char *name)
160 err = -EBUSY; 164 err = -EBUSY;
161 165
162 else { 166 else {
163 tcp_cleanup_congestion_control(tp); 167 tcp_cleanup_congestion_control(sk);
164 tp->ca_ops = ca; 168 icsk->icsk_ca_ops = ca;
165 if (tp->ca_ops->init) 169 if (icsk->icsk_ca_ops->init)
166 tp->ca_ops->init(tp); 170 icsk->icsk_ca_ops->init(sk);
167 } 171 }
168 out: 172 out:
169 rcu_read_unlock(); 173 rcu_read_unlock();
@@ -177,9 +181,11 @@ int tcp_set_congestion_control(struct tcp_sock *tp, const char *name)
177/* This is Jacobson's slow start and congestion avoidance. 181/* This is Jacobson's slow start and congestion avoidance.
178 * SIGCOMM '88, p. 328. 182 * SIGCOMM '88, p. 328.
179 */ 183 */
180void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, u32 in_flight, 184void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
181 int flag) 185 int flag)
182{ 186{
187 struct tcp_sock *tp = tcp_sk(sk);
188
183 if (in_flight < tp->snd_cwnd) 189 if (in_flight < tp->snd_cwnd)
184 return; 190 return;
185 191
@@ -202,15 +208,17 @@ void tcp_reno_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, u32 in_flight,
202EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 208EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
203 209
204/* Slow start threshold is half the congestion window (min 2) */ 210/* Slow start threshold is half the congestion window (min 2) */
205u32 tcp_reno_ssthresh(struct tcp_sock *tp) 211u32 tcp_reno_ssthresh(struct sock *sk)
206{ 212{
213 const struct tcp_sock *tp = tcp_sk(sk);
207 return max(tp->snd_cwnd >> 1U, 2U); 214 return max(tp->snd_cwnd >> 1U, 2U);
208} 215}
209EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); 216EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
210 217
211/* Lower bound on congestion window. */ 218/* Lower bound on congestion window. */
212u32 tcp_reno_min_cwnd(struct tcp_sock *tp) 219u32 tcp_reno_min_cwnd(struct sock *sk)
213{ 220{
221 const struct tcp_sock *tp = tcp_sk(sk);
214 return tp->snd_ssthresh/2; 222 return tp->snd_ssthresh/2;
215} 223}
216EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd); 224EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd);
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 5f4c74f45e82..4288ecfec9a7 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -66,10 +66,10 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
66 if (ext & (1<<(TCPDIAG_INFO-1))) 66 if (ext & (1<<(TCPDIAG_INFO-1)))
67 info = TCPDIAG_PUT(skb, TCPDIAG_INFO, sizeof(*info)); 67 info = TCPDIAG_PUT(skb, TCPDIAG_INFO, sizeof(*info));
68 68
69 if (ext & (1<<(TCPDIAG_CONG-1))) { 69 if ((ext & (1 << (TCPDIAG_CONG - 1))) && icsk->icsk_ca_ops) {
70 size_t len = strlen(tp->ca_ops->name); 70 size_t len = strlen(icsk->icsk_ca_ops->name);
71 strcpy(TCPDIAG_PUT(skb, TCPDIAG_CONG, len+1), 71 strcpy(TCPDIAG_PUT(skb, TCPDIAG_CONG, len+1),
72 tp->ca_ops->name); 72 icsk->icsk_ca_ops->name);
73 } 73 }
74 } 74 }
75 r->tcpdiag_family = sk->sk_family; 75 r->tcpdiag_family = sk->sk_family;
@@ -136,18 +136,17 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
136 r->tcpdiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 136 r->tcpdiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
137 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 137 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
138 r->tcpdiag_timer = 4; 138 r->tcpdiag_timer = 4;
139 r->tcpdiag_retrans = tp->probes_out; 139 r->tcpdiag_retrans = icsk->icsk_probes_out;
140 r->tcpdiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 140 r->tcpdiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
141 } else if (timer_pending(&sk->sk_timer)) { 141 } else if (timer_pending(&sk->sk_timer)) {
142 r->tcpdiag_timer = 2; 142 r->tcpdiag_timer = 2;
143 r->tcpdiag_retrans = tp->probes_out; 143 r->tcpdiag_retrans = icsk->icsk_probes_out;
144 r->tcpdiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires); 144 r->tcpdiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
145 } else { 145 } else {
146 r->tcpdiag_timer = 0; 146 r->tcpdiag_timer = 0;
147 r->tcpdiag_expires = 0; 147 r->tcpdiag_expires = 0;
148 } 148 }
149#undef EXPIRES_IN_MS 149#undef EXPIRES_IN_MS
150
151 r->tcpdiag_rqueue = tp->rcv_nxt - tp->copied_seq; 150 r->tcpdiag_rqueue = tp->rcv_nxt - tp->copied_seq;
152 r->tcpdiag_wqueue = tp->write_seq - tp->snd_una; 151 r->tcpdiag_wqueue = tp->write_seq - tp->snd_una;
153 r->tcpdiag_uid = sock_i_uid(sk); 152 r->tcpdiag_uid = sock_i_uid(sk);
@@ -163,8 +162,9 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
163 if (info) 162 if (info)
164 tcp_get_info(sk, info); 163 tcp_get_info(sk, info);
165 164
166 if (sk->sk_state < TCP_TIME_WAIT && tp->ca_ops->get_info) 165 if (sk->sk_state < TCP_TIME_WAIT &&
167 tp->ca_ops->get_info(tp, ext, skb); 166 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
167 icsk->icsk_ca_ops->get_info(sk, ext, skb);
168 168
169 nlh->nlmsg_len = skb->tail - b; 169 nlh->nlmsg_len = skb->tail - b;
170 return skb->len; 170 return skb->len;
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 36c51f8136bf..6acc04bde080 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -98,9 +98,10 @@ struct hstcp {
98 u32 ai; 98 u32 ai;
99}; 99};
100 100
101static void hstcp_init(struct tcp_sock *tp) 101static void hstcp_init(struct sock *sk)
102{ 102{
103 struct hstcp *ca = tcp_ca(tp); 103 struct tcp_sock *tp = tcp_sk(sk);
104 struct hstcp *ca = inet_csk_ca(sk);
104 105
105 ca->ai = 0; 106 ca->ai = 0;
106 107
@@ -109,10 +110,11 @@ static void hstcp_init(struct tcp_sock *tp)
109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); 110 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
110} 111}
111 112
112static void hstcp_cong_avoid(struct tcp_sock *tp, u32 adk, u32 rtt, 113static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
113 u32 in_flight, int good) 114 u32 in_flight, int good)
114{ 115{
115 struct hstcp *ca = tcp_ca(tp); 116 struct tcp_sock *tp = tcp_sk(sk);
117 struct hstcp *ca = inet_csk_ca(sk);
116 118
117 if (in_flight < tp->snd_cwnd) 119 if (in_flight < tp->snd_cwnd)
118 return; 120 return;
@@ -143,9 +145,10 @@ static void hstcp_cong_avoid(struct tcp_sock *tp, u32 adk, u32 rtt,
143 } 145 }
144} 146}
145 147
146static u32 hstcp_ssthresh(struct tcp_sock *tp) 148static u32 hstcp_ssthresh(struct sock *sk)
147{ 149{
148 struct hstcp *ca = tcp_ca(tp); 150 const struct tcp_sock *tp = tcp_sk(sk);
151 const struct hstcp *ca = inet_csk_ca(sk);
149 152
150 /* Do multiplicative decrease */ 153 /* Do multiplicative decrease */
151 return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U); 154 return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
@@ -164,7 +167,7 @@ static struct tcp_congestion_ops tcp_highspeed = {
164 167
165static int __init hstcp_register(void) 168static int __init hstcp_register(void)
166{ 169{
167 BUG_ON(sizeof(struct hstcp) > TCP_CA_PRIV_SIZE); 170 BUG_ON(sizeof(struct hstcp) > ICSK_CA_PRIV_SIZE);
168 return tcp_register_congestion_control(&tcp_highspeed); 171 return tcp_register_congestion_control(&tcp_highspeed);
169} 172}
170 173
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 40168275acf9..e47b37984e95 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -55,18 +55,21 @@ static inline void htcp_reset(struct htcp *ca)
55 ca->snd_cwnd_cnt2 = 0; 55 ca->snd_cwnd_cnt2 = 0;
56} 56}
57 57
58static u32 htcp_cwnd_undo(struct tcp_sock *tp) 58static u32 htcp_cwnd_undo(struct sock *sk)
59{ 59{
60 struct htcp *ca = tcp_ca(tp); 60 const struct tcp_sock *tp = tcp_sk(sk);
61 struct htcp *ca = inet_csk_ca(sk);
61 ca->ccount = ca->undo_ccount; 62 ca->ccount = ca->undo_ccount;
62 ca->maxRTT = ca->undo_maxRTT; 63 ca->maxRTT = ca->undo_maxRTT;
63 ca->old_maxB = ca->undo_old_maxB; 64 ca->old_maxB = ca->undo_old_maxB;
64 return max(tp->snd_cwnd, (tp->snd_ssthresh<<7)/ca->beta); 65 return max(tp->snd_cwnd, (tp->snd_ssthresh<<7)/ca->beta);
65} 66}
66 67
67static inline void measure_rtt(struct tcp_sock *tp) 68static inline void measure_rtt(struct sock *sk)
68{ 69{
69 struct htcp *ca = tcp_ca(tp); 70 const struct inet_connection_sock *icsk = inet_csk(sk);
71 const struct tcp_sock *tp = tcp_sk(sk);
72 struct htcp *ca = inet_csk_ca(sk);
70 u32 srtt = tp->srtt>>3; 73 u32 srtt = tp->srtt>>3;
71 74
72 /* keep track of minimum RTT seen so far, minRTT is zero at first */ 75 /* keep track of minimum RTT seen so far, minRTT is zero at first */
@@ -74,7 +77,7 @@ static inline void measure_rtt(struct tcp_sock *tp)
74 ca->minRTT = srtt; 77 ca->minRTT = srtt;
75 78
76 /* max RTT */ 79 /* max RTT */
77 if (tp->ca_state == TCP_CA_Open && tp->snd_ssthresh < 0xFFFF && ca->ccount > 3) { 80 if (icsk->icsk_ca_state == TCP_CA_Open && tp->snd_ssthresh < 0xFFFF && ca->ccount > 3) {
78 if (ca->maxRTT < ca->minRTT) 81 if (ca->maxRTT < ca->minRTT)
79 ca->maxRTT = ca->minRTT; 82 ca->maxRTT = ca->minRTT;
80 if (ca->maxRTT < srtt && srtt <= ca->maxRTT+HZ/50) 83 if (ca->maxRTT < srtt && srtt <= ca->maxRTT+HZ/50)
@@ -82,13 +85,16 @@ static inline void measure_rtt(struct tcp_sock *tp)
82 } 85 }
83} 86}
84 87
85static void measure_achieved_throughput(struct tcp_sock *tp, u32 pkts_acked) 88static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked)
86{ 89{
87 struct htcp *ca = tcp_ca(tp); 90 const struct inet_connection_sock *icsk = inet_csk(sk);
91 const struct tcp_sock *tp = tcp_sk(sk);
92 struct htcp *ca = inet_csk_ca(sk);
88 u32 now = tcp_time_stamp; 93 u32 now = tcp_time_stamp;
89 94
90 /* achieved throughput calculations */ 95 /* achieved throughput calculations */
91 if (tp->ca_state != TCP_CA_Open && tp->ca_state != TCP_CA_Disorder) { 96 if (icsk->icsk_ca_state != TCP_CA_Open &&
97 icsk->icsk_ca_state != TCP_CA_Disorder) {
92 ca->packetcount = 0; 98 ca->packetcount = 0;
93 ca->lasttime = now; 99 ca->lasttime = now;
94 return; 100 return;
@@ -173,9 +179,9 @@ static inline void htcp_alpha_update(struct htcp *ca)
173 * that point do we really have a real sense of maxRTT (the queues en route 179 * that point do we really have a real sense of maxRTT (the queues en route
174 * were getting just too full now). 180 * were getting just too full now).
175 */ 181 */
176static void htcp_param_update(struct tcp_sock *tp) 182static void htcp_param_update(struct sock *sk)
177{ 183{
178 struct htcp *ca = tcp_ca(tp); 184 struct htcp *ca = inet_csk_ca(sk);
179 u32 minRTT = ca->minRTT; 185 u32 minRTT = ca->minRTT;
180 u32 maxRTT = ca->maxRTT; 186 u32 maxRTT = ca->maxRTT;
181 187
@@ -187,17 +193,19 @@ static void htcp_param_update(struct tcp_sock *tp)
187 ca->maxRTT = minRTT + ((maxRTT-minRTT)*95)/100; 193 ca->maxRTT = minRTT + ((maxRTT-minRTT)*95)/100;
188} 194}
189 195
190static u32 htcp_recalc_ssthresh(struct tcp_sock *tp) 196static u32 htcp_recalc_ssthresh(struct sock *sk)
191{ 197{
192 struct htcp *ca = tcp_ca(tp); 198 const struct tcp_sock *tp = tcp_sk(sk);
193 htcp_param_update(tp); 199 const struct htcp *ca = inet_csk_ca(sk);
200 htcp_param_update(sk);
194 return max((tp->snd_cwnd * ca->beta) >> 7, 2U); 201 return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
195} 202}
196 203
197static void htcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, 204static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
198 u32 in_flight, int data_acked) 205 u32 in_flight, int data_acked)
199{ 206{
200 struct htcp *ca = tcp_ca(tp); 207 struct tcp_sock *tp = tcp_sk(sk);
208 struct htcp *ca = inet_csk_ca(sk);
201 209
202 if (in_flight < tp->snd_cwnd) 210 if (in_flight < tp->snd_cwnd)
203 return; 211 return;
@@ -207,7 +215,7 @@ static void htcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt,
207 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 215 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
208 tp->snd_cwnd++; 216 tp->snd_cwnd++;
209 } else { 217 } else {
210 measure_rtt(tp); 218 measure_rtt(sk);
211 219
212 /* keep track of number of round-trip times since last backoff event */ 220 /* keep track of number of round-trip times since last backoff event */
213 if (ca->snd_cwnd_cnt2++ > tp->snd_cwnd) { 221 if (ca->snd_cwnd_cnt2++ > tp->snd_cwnd) {
@@ -229,28 +237,29 @@ static void htcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt,
229} 237}
230 238
231/* Lower bound on congestion window. */ 239/* Lower bound on congestion window. */
232static u32 htcp_min_cwnd(struct tcp_sock *tp) 240static u32 htcp_min_cwnd(struct sock *sk)
233{ 241{
242 const struct tcp_sock *tp = tcp_sk(sk);
234 return tp->snd_ssthresh; 243 return tp->snd_ssthresh;
235} 244}
236 245
237 246
238static void htcp_init(struct tcp_sock *tp) 247static void htcp_init(struct sock *sk)
239{ 248{
240 struct htcp *ca = tcp_ca(tp); 249 struct htcp *ca = inet_csk_ca(sk);
241 250
242 memset(ca, 0, sizeof(struct htcp)); 251 memset(ca, 0, sizeof(struct htcp));
243 ca->alpha = ALPHA_BASE; 252 ca->alpha = ALPHA_BASE;
244 ca->beta = BETA_MIN; 253 ca->beta = BETA_MIN;
245} 254}
246 255
247static void htcp_state(struct tcp_sock *tp, u8 new_state) 256static void htcp_state(struct sock *sk, u8 new_state)
248{ 257{
249 switch (new_state) { 258 switch (new_state) {
250 case TCP_CA_CWR: 259 case TCP_CA_CWR:
251 case TCP_CA_Recovery: 260 case TCP_CA_Recovery:
252 case TCP_CA_Loss: 261 case TCP_CA_Loss:
253 htcp_reset(tcp_ca(tp)); 262 htcp_reset(inet_csk_ca(sk));
254 break; 263 break;
255 } 264 }
256} 265}
@@ -269,7 +278,7 @@ static struct tcp_congestion_ops htcp = {
269 278
270static int __init htcp_register(void) 279static int __init htcp_register(void)
271{ 280{
272 BUG_ON(sizeof(struct htcp) > TCP_CA_PRIV_SIZE); 281 BUG_ON(sizeof(struct htcp) > ICSK_CA_PRIV_SIZE);
273 BUILD_BUG_ON(BETA_MIN >= BETA_MAX); 282 BUILD_BUG_ON(BETA_MIN >= BETA_MAX);
274 if (!use_bandwidth_switch) 283 if (!use_bandwidth_switch)
275 htcp.pkts_acked = NULL; 284 htcp.pkts_acked = NULL;
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index 13a66342c304..77add63623df 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -33,19 +33,20 @@ MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
33 33
34 34
35/* This is called to refresh values for hybla parameters */ 35/* This is called to refresh values for hybla parameters */
36static inline void hybla_recalc_param (struct tcp_sock *tp) 36static inline void hybla_recalc_param (struct sock *sk)
37{ 37{
38 struct hybla *ca = tcp_ca(tp); 38 struct hybla *ca = inet_csk_ca(sk);
39 39
40 ca->rho_3ls = max_t(u32, tp->srtt / msecs_to_jiffies(rtt0), 8); 40 ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8);
41 ca->rho = ca->rho_3ls >> 3; 41 ca->rho = ca->rho_3ls >> 3;
42 ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1; 42 ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
43 ca->rho2 = ca->rho2_7ls >>7; 43 ca->rho2 = ca->rho2_7ls >>7;
44} 44}
45 45
46static void hybla_init(struct tcp_sock *tp) 46static void hybla_init(struct sock *sk)
47{ 47{
48 struct hybla *ca = tcp_ca(tp); 48 struct tcp_sock *tp = tcp_sk(sk);
49 struct hybla *ca = inet_csk_ca(sk);
49 50
50 ca->rho = 0; 51 ca->rho = 0;
51 ca->rho2 = 0; 52 ca->rho2 = 0;
@@ -57,17 +58,16 @@ static void hybla_init(struct tcp_sock *tp)
57 tp->snd_cwnd_clamp = 65535; 58 tp->snd_cwnd_clamp = 65535;
58 59
59 /* 1st Rho measurement based on initial srtt */ 60 /* 1st Rho measurement based on initial srtt */
60 hybla_recalc_param(tp); 61 hybla_recalc_param(sk);
61 62
62 /* set minimum rtt as this is the 1st ever seen */ 63 /* set minimum rtt as this is the 1st ever seen */
63 ca->minrtt = tp->srtt; 64 ca->minrtt = tp->srtt;
64 tp->snd_cwnd = ca->rho; 65 tp->snd_cwnd = ca->rho;
65} 66}
66 67
67static void hybla_state(struct tcp_sock *tp, u8 ca_state) 68static void hybla_state(struct sock *sk, u8 ca_state)
68{ 69{
69 struct hybla *ca = tcp_ca(tp); 70 struct hybla *ca = inet_csk_ca(sk);
70
71 ca->hybla_en = (ca_state == TCP_CA_Open); 71 ca->hybla_en = (ca_state == TCP_CA_Open);
72} 72}
73 73
@@ -86,27 +86,28 @@ static inline u32 hybla_fraction(u32 odds)
86 * o Give cwnd a new value based on the model proposed 86 * o Give cwnd a new value based on the model proposed
87 * o remember increments <1 87 * o remember increments <1
88 */ 88 */
89static void hybla_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, 89static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
90 u32 in_flight, int flag) 90 u32 in_flight, int flag)
91{ 91{
92 struct hybla *ca = tcp_ca(tp); 92 struct tcp_sock *tp = tcp_sk(sk);
93 struct hybla *ca = inet_csk_ca(sk);
93 u32 increment, odd, rho_fractions; 94 u32 increment, odd, rho_fractions;
94 int is_slowstart = 0; 95 int is_slowstart = 0;
95 96
96 /* Recalculate rho only if this srtt is the lowest */ 97 /* Recalculate rho only if this srtt is the lowest */
97 if (tp->srtt < ca->minrtt){ 98 if (tp->srtt < ca->minrtt){
98 hybla_recalc_param(tp); 99 hybla_recalc_param(sk);
99 ca->minrtt = tp->srtt; 100 ca->minrtt = tp->srtt;
100 } 101 }
101 102
102 if (!ca->hybla_en) 103 if (!ca->hybla_en)
103 return tcp_reno_cong_avoid(tp, ack, rtt, in_flight, flag); 104 return tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag);
104 105
105 if (in_flight < tp->snd_cwnd) 106 if (in_flight < tp->snd_cwnd)
106 return; 107 return;
107 108
108 if (ca->rho == 0) 109 if (ca->rho == 0)
109 hybla_recalc_param(tp); 110 hybla_recalc_param(sk);
110 111
111 rho_fractions = ca->rho_3ls - (ca->rho << 3); 112 rho_fractions = ca->rho_3ls - (ca->rho << 3);
112 113
@@ -170,7 +171,7 @@ static struct tcp_congestion_ops tcp_hybla = {
170 171
171static int __init hybla_register(void) 172static int __init hybla_register(void)
172{ 173{
173 BUG_ON(sizeof(struct hybla) > TCP_CA_PRIV_SIZE); 174 BUG_ON(sizeof(struct hybla) > ICSK_CA_PRIV_SIZE);
174 return tcp_register_congestion_control(&tcp_hybla); 175 return tcp_register_congestion_control(&tcp_hybla);
175} 176}
176 177
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 71d456148de7..fdd9547fb783 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -325,11 +325,12 @@ static void tcp_init_buffer_space(struct sock *sk)
325/* 5. Recalculate window clamp after socket hit its memory bounds. */ 325/* 5. Recalculate window clamp after socket hit its memory bounds. */
326static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) 326static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
327{ 327{
328 struct inet_connection_sock *icsk = inet_csk(sk);
328 struct sk_buff *skb; 329 struct sk_buff *skb;
329 unsigned int app_win = tp->rcv_nxt - tp->copied_seq; 330 unsigned int app_win = tp->rcv_nxt - tp->copied_seq;
330 int ofo_win = 0; 331 int ofo_win = 0;
331 332
332 inet_csk(sk)->icsk_ack.quick = 0; 333 icsk->icsk_ack.quick = 0;
333 334
334 skb_queue_walk(&tp->out_of_order_queue, skb) { 335 skb_queue_walk(&tp->out_of_order_queue, skb) {
335 ofo_win += skb->len; 336 ofo_win += skb->len;
@@ -350,8 +351,8 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
350 app_win += ofo_win; 351 app_win += ofo_win;
351 if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf) 352 if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf)
352 app_win >>= 1; 353 app_win >>= 1;
353 if (app_win > inet_csk(sk)->icsk_ack.rcv_mss) 354 if (app_win > icsk->icsk_ack.rcv_mss)
354 app_win -= inet_csk(sk)->icsk_ack.rcv_mss; 355 app_win -= icsk->icsk_ack.rcv_mss;
355 app_win = max(app_win, 2U*tp->advmss); 356 app_win = max(app_win, 2U*tp->advmss);
356 357
357 if (!ofo_win) 358 if (!ofo_win)
@@ -549,8 +550,10 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
549 * To save cycles in the RFC 1323 implementation it was better to break 550 * To save cycles in the RFC 1323 implementation it was better to break
550 * it up into three procedures. -- erics 551 * it up into three procedures. -- erics
551 */ 552 */
552static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt, u32 *usrtt) 553static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt, u32 *usrtt)
553{ 554{
555 struct tcp_sock *tp = tcp_sk(sk);
556 const struct inet_connection_sock *icsk = inet_csk(sk);
554 long m = mrtt; /* RTT */ 557 long m = mrtt; /* RTT */
555 558
556 /* The following amusing code comes from Jacobson's 559 /* The following amusing code comes from Jacobson's
@@ -610,8 +613,8 @@ static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt, u32 *usrtt)
610 tp->rtt_seq = tp->snd_nxt; 613 tp->rtt_seq = tp->snd_nxt;
611 } 614 }
612 615
613 if (tp->ca_ops->rtt_sample) 616 if (icsk->icsk_ca_ops->rtt_sample)
614 tp->ca_ops->rtt_sample(tp, *usrtt); 617 icsk->icsk_ca_ops->rtt_sample(sk, *usrtt);
615} 618}
616 619
617/* Calculate rto without backoff. This is the second half of Van Jacobson's 620/* Calculate rto without backoff. This is the second half of Van Jacobson's
@@ -663,9 +666,10 @@ void tcp_update_metrics(struct sock *sk)
663 dst_confirm(dst); 666 dst_confirm(dst);
664 667
665 if (dst && (dst->flags&DST_HOST)) { 668 if (dst && (dst->flags&DST_HOST)) {
669 const struct inet_connection_sock *icsk = inet_csk(sk);
666 int m; 670 int m;
667 671
668 if (inet_csk(sk)->icsk_backoff || !tp->srtt) { 672 if (icsk->icsk_backoff || !tp->srtt) {
669 /* This session failed to estimate rtt. Why? 673 /* This session failed to estimate rtt. Why?
670 * Probably, no packets returned in time. 674 * Probably, no packets returned in time.
671 * Reset our results. 675 * Reset our results.
@@ -714,7 +718,7 @@ void tcp_update_metrics(struct sock *sk)
714 tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) 718 tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
715 dst->metrics[RTAX_CWND-1] = tp->snd_cwnd; 719 dst->metrics[RTAX_CWND-1] = tp->snd_cwnd;
716 } else if (tp->snd_cwnd > tp->snd_ssthresh && 720 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
717 tp->ca_state == TCP_CA_Open) { 721 icsk->icsk_ca_state == TCP_CA_Open) {
718 /* Cong. avoidance phase, cwnd is reliable. */ 722 /* Cong. avoidance phase, cwnd is reliable. */
719 if (!dst_metric_locked(dst, RTAX_SSTHRESH)) 723 if (!dst_metric_locked(dst, RTAX_SSTHRESH))
720 dst->metrics[RTAX_SSTHRESH-1] = 724 dst->metrics[RTAX_SSTHRESH-1] =
@@ -828,8 +832,10 @@ reset:
828 } 832 }
829} 833}
830 834
831static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts) 835static void tcp_update_reordering(struct sock *sk, const int metric,
836 const int ts)
832{ 837{
838 struct tcp_sock *tp = tcp_sk(sk);
833 if (metric > tp->reordering) { 839 if (metric > tp->reordering) {
834 tp->reordering = min(TCP_MAX_REORDERING, metric); 840 tp->reordering = min(TCP_MAX_REORDERING, metric);
835 841
@@ -844,7 +850,7 @@ static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts)
844 NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); 850 NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
845#if FASTRETRANS_DEBUG > 1 851#if FASTRETRANS_DEBUG > 1
846 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 852 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
847 tp->rx_opt.sack_ok, tp->ca_state, 853 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
848 tp->reordering, 854 tp->reordering,
849 tp->fackets_out, 855 tp->fackets_out,
850 tp->sacked_out, 856 tp->sacked_out,
@@ -906,6 +912,7 @@ static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts)
906static int 912static int
907tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una) 913tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
908{ 914{
915 const struct inet_connection_sock *icsk = inet_csk(sk);
909 struct tcp_sock *tp = tcp_sk(sk); 916 struct tcp_sock *tp = tcp_sk(sk);
910 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked; 917 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
911 struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2); 918 struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2);
@@ -1071,7 +1078,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1071 * we have to account for reordering! Ugly, 1078 * we have to account for reordering! Ugly,
1072 * but should help. 1079 * but should help.
1073 */ 1080 */
1074 if (lost_retrans && tp->ca_state == TCP_CA_Recovery) { 1081 if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) {
1075 struct sk_buff *skb; 1082 struct sk_buff *skb;
1076 1083
1077 sk_stream_for_retrans_queue(skb, sk) { 1084 sk_stream_for_retrans_queue(skb, sk) {
@@ -1100,8 +1107,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1100 1107
1101 tp->left_out = tp->sacked_out + tp->lost_out; 1108 tp->left_out = tp->sacked_out + tp->lost_out;
1102 1109
1103 if ((reord < tp->fackets_out) && tp->ca_state != TCP_CA_Loss) 1110 if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss)
1104 tcp_update_reordering(tp, ((tp->fackets_out + 1) - reord), 0); 1111 tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
1105 1112
1106#if FASTRETRANS_DEBUG > 0 1113#if FASTRETRANS_DEBUG > 0
1107 BUG_TRAP((int)tp->sacked_out >= 0); 1114 BUG_TRAP((int)tp->sacked_out >= 0);
@@ -1118,17 +1125,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1118 */ 1125 */
1119void tcp_enter_frto(struct sock *sk) 1126void tcp_enter_frto(struct sock *sk)
1120{ 1127{
1128 const struct inet_connection_sock *icsk = inet_csk(sk);
1121 struct tcp_sock *tp = tcp_sk(sk); 1129 struct tcp_sock *tp = tcp_sk(sk);
1122 struct sk_buff *skb; 1130 struct sk_buff *skb;
1123 1131
1124 tp->frto_counter = 1; 1132 tp->frto_counter = 1;
1125 1133
1126 if (tp->ca_state <= TCP_CA_Disorder || 1134 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
1127 tp->snd_una == tp->high_seq || 1135 tp->snd_una == tp->high_seq ||
1128 (tp->ca_state == TCP_CA_Loss && !inet_csk(sk)->icsk_retransmits)) { 1136 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1129 tp->prior_ssthresh = tcp_current_ssthresh(tp); 1137 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1130 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 1138 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1131 tcp_ca_event(tp, CA_EVENT_FRTO); 1139 tcp_ca_event(sk, CA_EVENT_FRTO);
1132 } 1140 }
1133 1141
1134 /* Have to clear retransmission markers here to keep the bookkeeping 1142 /* Have to clear retransmission markers here to keep the bookkeeping
@@ -1145,7 +1153,7 @@ void tcp_enter_frto(struct sock *sk)
1145 } 1153 }
1146 tcp_sync_left_out(tp); 1154 tcp_sync_left_out(tp);
1147 1155
1148 tcp_set_ca_state(tp, TCP_CA_Open); 1156 tcp_set_ca_state(sk, TCP_CA_Open);
1149 tp->frto_highmark = tp->snd_nxt; 1157 tp->frto_highmark = tp->snd_nxt;
1150} 1158}
1151 1159
@@ -1191,7 +1199,7 @@ static void tcp_enter_frto_loss(struct sock *sk)
1191 1199
1192 tp->reordering = min_t(unsigned int, tp->reordering, 1200 tp->reordering = min_t(unsigned int, tp->reordering,
1193 sysctl_tcp_reordering); 1201 sysctl_tcp_reordering);
1194 tcp_set_ca_state(tp, TCP_CA_Loss); 1202 tcp_set_ca_state(sk, TCP_CA_Loss);
1195 tp->high_seq = tp->frto_highmark; 1203 tp->high_seq = tp->frto_highmark;
1196 TCP_ECN_queue_cwr(tp); 1204 TCP_ECN_queue_cwr(tp);
1197} 1205}
@@ -1215,16 +1223,17 @@ void tcp_clear_retrans(struct tcp_sock *tp)
1215 */ 1223 */
1216void tcp_enter_loss(struct sock *sk, int how) 1224void tcp_enter_loss(struct sock *sk, int how)
1217{ 1225{
1226 const struct inet_connection_sock *icsk = inet_csk(sk);
1218 struct tcp_sock *tp = tcp_sk(sk); 1227 struct tcp_sock *tp = tcp_sk(sk);
1219 struct sk_buff *skb; 1228 struct sk_buff *skb;
1220 int cnt = 0; 1229 int cnt = 0;
1221 1230
1222 /* Reduce ssthresh if it has not yet been made inside this window. */ 1231 /* Reduce ssthresh if it has not yet been made inside this window. */
1223 if (tp->ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || 1232 if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq ||
1224 (tp->ca_state == TCP_CA_Loss && !inet_csk(sk)->icsk_retransmits)) { 1233 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1225 tp->prior_ssthresh = tcp_current_ssthresh(tp); 1234 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1226 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 1235 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1227 tcp_ca_event(tp, CA_EVENT_LOSS); 1236 tcp_ca_event(sk, CA_EVENT_LOSS);
1228 } 1237 }
1229 tp->snd_cwnd = 1; 1238 tp->snd_cwnd = 1;
1230 tp->snd_cwnd_cnt = 0; 1239 tp->snd_cwnd_cnt = 0;
@@ -1255,7 +1264,7 @@ void tcp_enter_loss(struct sock *sk, int how)
1255 1264
1256 tp->reordering = min_t(unsigned int, tp->reordering, 1265 tp->reordering = min_t(unsigned int, tp->reordering,
1257 sysctl_tcp_reordering); 1266 sysctl_tcp_reordering);
1258 tcp_set_ca_state(tp, TCP_CA_Loss); 1267 tcp_set_ca_state(sk, TCP_CA_Loss);
1259 tp->high_seq = tp->snd_nxt; 1268 tp->high_seq = tp->snd_nxt;
1260 TCP_ECN_queue_cwr(tp); 1269 TCP_ECN_queue_cwr(tp);
1261} 1270}
@@ -1272,13 +1281,14 @@ static int tcp_check_sack_reneging(struct sock *sk)
1272 */ 1281 */
1273 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL && 1282 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
1274 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 1283 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
1284 struct inet_connection_sock *icsk = inet_csk(sk);
1275 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); 1285 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
1276 1286
1277 tcp_enter_loss(sk, 1); 1287 tcp_enter_loss(sk, 1);
1278 inet_csk(sk)->icsk_retransmits++; 1288 icsk->icsk_retransmits++;
1279 tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)); 1289 tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
1280 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 1290 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1281 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 1291 icsk->icsk_rto, TCP_RTO_MAX);
1282 return 1; 1292 return 1;
1283 } 1293 }
1284 return 0; 1294 return 0;
@@ -1431,8 +1441,9 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
1431 * in assumption of absent reordering, interpret this as reordering. 1441 * in assumption of absent reordering, interpret this as reordering.
1432 * The only another reason could be bug in receiver TCP. 1442 * The only another reason could be bug in receiver TCP.
1433 */ 1443 */
1434static void tcp_check_reno_reordering(struct tcp_sock *tp, int addend) 1444static void tcp_check_reno_reordering(struct sock *sk, const int addend)
1435{ 1445{
1446 struct tcp_sock *tp = tcp_sk(sk);
1436 u32 holes; 1447 u32 holes;
1437 1448
1438 holes = max(tp->lost_out, 1U); 1449 holes = max(tp->lost_out, 1U);
@@ -1440,16 +1451,17 @@ static void tcp_check_reno_reordering(struct tcp_sock *tp, int addend)
1440 1451
1441 if ((tp->sacked_out + holes) > tp->packets_out) { 1452 if ((tp->sacked_out + holes) > tp->packets_out) {
1442 tp->sacked_out = tp->packets_out - holes; 1453 tp->sacked_out = tp->packets_out - holes;
1443 tcp_update_reordering(tp, tp->packets_out+addend, 0); 1454 tcp_update_reordering(sk, tp->packets_out + addend, 0);
1444 } 1455 }
1445} 1456}
1446 1457
1447/* Emulate SACKs for SACKless connection: account for a new dupack. */ 1458/* Emulate SACKs for SACKless connection: account for a new dupack. */
1448 1459
1449static void tcp_add_reno_sack(struct tcp_sock *tp) 1460static void tcp_add_reno_sack(struct sock *sk)
1450{ 1461{
1462 struct tcp_sock *tp = tcp_sk(sk);
1451 tp->sacked_out++; 1463 tp->sacked_out++;
1452 tcp_check_reno_reordering(tp, 0); 1464 tcp_check_reno_reordering(sk, 0);
1453 tcp_sync_left_out(tp); 1465 tcp_sync_left_out(tp);
1454} 1466}
1455 1467
@@ -1464,7 +1476,7 @@ static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acke
1464 else 1476 else
1465 tp->sacked_out -= acked-1; 1477 tp->sacked_out -= acked-1;
1466 } 1478 }
1467 tcp_check_reno_reordering(tp, acked); 1479 tcp_check_reno_reordering(sk, acked);
1468 tcp_sync_left_out(tp); 1480 tcp_sync_left_out(tp);
1469} 1481}
1470 1482
@@ -1538,14 +1550,16 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
1538} 1550}
1539 1551
1540/* Decrease cwnd each second ack. */ 1552/* Decrease cwnd each second ack. */
1541static void tcp_cwnd_down(struct tcp_sock *tp) 1553static void tcp_cwnd_down(struct sock *sk)
1542{ 1554{
1555 const struct inet_connection_sock *icsk = inet_csk(sk);
1556 struct tcp_sock *tp = tcp_sk(sk);
1543 int decr = tp->snd_cwnd_cnt + 1; 1557 int decr = tp->snd_cwnd_cnt + 1;
1544 1558
1545 tp->snd_cwnd_cnt = decr&1; 1559 tp->snd_cwnd_cnt = decr&1;
1546 decr >>= 1; 1560 decr >>= 1;
1547 1561
1548 if (decr && tp->snd_cwnd > tp->ca_ops->min_cwnd(tp)) 1562 if (decr && tp->snd_cwnd > icsk->icsk_ca_ops->min_cwnd(sk))
1549 tp->snd_cwnd -= decr; 1563 tp->snd_cwnd -= decr;
1550 1564
1551 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); 1565 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
@@ -1579,11 +1593,15 @@ static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg)
1579#define DBGUNDO(x...) do { } while (0) 1593#define DBGUNDO(x...) do { } while (0)
1580#endif 1594#endif
1581 1595
1582static void tcp_undo_cwr(struct tcp_sock *tp, int undo) 1596static void tcp_undo_cwr(struct sock *sk, const int undo)
1583{ 1597{
1598 struct tcp_sock *tp = tcp_sk(sk);
1599
1584 if (tp->prior_ssthresh) { 1600 if (tp->prior_ssthresh) {
1585 if (tp->ca_ops->undo_cwnd) 1601 const struct inet_connection_sock *icsk = inet_csk(sk);
1586 tp->snd_cwnd = tp->ca_ops->undo_cwnd(tp); 1602
1603 if (icsk->icsk_ca_ops->undo_cwnd)
1604 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
1587 else 1605 else
1588 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1); 1606 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1);
1589 1607
@@ -1611,9 +1629,9 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
1611 /* Happy end! We did not retransmit anything 1629 /* Happy end! We did not retransmit anything
1612 * or our original transmission succeeded. 1630 * or our original transmission succeeded.
1613 */ 1631 */
1614 DBGUNDO(sk, tp, tp->ca_state == TCP_CA_Loss ? "loss" : "retrans"); 1632 DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
1615 tcp_undo_cwr(tp, 1); 1633 tcp_undo_cwr(sk, 1);
1616 if (tp->ca_state == TCP_CA_Loss) 1634 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
1617 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 1635 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
1618 else 1636 else
1619 NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); 1637 NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
@@ -1626,7 +1644,7 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
1626 tcp_moderate_cwnd(tp); 1644 tcp_moderate_cwnd(tp);
1627 return 1; 1645 return 1;
1628 } 1646 }
1629 tcp_set_ca_state(tp, TCP_CA_Open); 1647 tcp_set_ca_state(sk, TCP_CA_Open);
1630 return 0; 1648 return 0;
1631} 1649}
1632 1650
@@ -1635,7 +1653,7 @@ static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp)
1635{ 1653{
1636 if (tp->undo_marker && !tp->undo_retrans) { 1654 if (tp->undo_marker && !tp->undo_retrans) {
1637 DBGUNDO(sk, tp, "D-SACK"); 1655 DBGUNDO(sk, tp, "D-SACK");
1638 tcp_undo_cwr(tp, 1); 1656 tcp_undo_cwr(sk, 1);
1639 tp->undo_marker = 0; 1657 tp->undo_marker = 0;
1640 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); 1658 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
1641 } 1659 }
@@ -1656,10 +1674,10 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
1656 if (tp->retrans_out == 0) 1674 if (tp->retrans_out == 0)
1657 tp->retrans_stamp = 0; 1675 tp->retrans_stamp = 0;
1658 1676
1659 tcp_update_reordering(tp, tcp_fackets_out(tp)+acked, 1); 1677 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
1660 1678
1661 DBGUNDO(sk, tp, "Hoe"); 1679 DBGUNDO(sk, tp, "Hoe");
1662 tcp_undo_cwr(tp, 0); 1680 tcp_undo_cwr(sk, 0);
1663 NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); 1681 NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
1664 1682
1665 /* So... Do not make Hoe's retransmit yet. 1683 /* So... Do not make Hoe's retransmit yet.
@@ -1682,22 +1700,23 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
1682 DBGUNDO(sk, tp, "partial loss"); 1700 DBGUNDO(sk, tp, "partial loss");
1683 tp->lost_out = 0; 1701 tp->lost_out = 0;
1684 tp->left_out = tp->sacked_out; 1702 tp->left_out = tp->sacked_out;
1685 tcp_undo_cwr(tp, 1); 1703 tcp_undo_cwr(sk, 1);
1686 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 1704 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
1687 inet_csk(sk)->icsk_retransmits = 0; 1705 inet_csk(sk)->icsk_retransmits = 0;
1688 tp->undo_marker = 0; 1706 tp->undo_marker = 0;
1689 if (!IsReno(tp)) 1707 if (!IsReno(tp))
1690 tcp_set_ca_state(tp, TCP_CA_Open); 1708 tcp_set_ca_state(sk, TCP_CA_Open);
1691 return 1; 1709 return 1;
1692 } 1710 }
1693 return 0; 1711 return 0;
1694} 1712}
1695 1713
1696static inline void tcp_complete_cwr(struct tcp_sock *tp) 1714static inline void tcp_complete_cwr(struct sock *sk)
1697{ 1715{
1716 struct tcp_sock *tp = tcp_sk(sk);
1698 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 1717 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
1699 tp->snd_cwnd_stamp = tcp_time_stamp; 1718 tp->snd_cwnd_stamp = tcp_time_stamp;
1700 tcp_ca_event(tp, CA_EVENT_COMPLETE_CWR); 1719 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
1701} 1720}
1702 1721
1703static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag) 1722static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
@@ -1708,21 +1727,21 @@ static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
1708 tp->retrans_stamp = 0; 1727 tp->retrans_stamp = 0;
1709 1728
1710 if (flag&FLAG_ECE) 1729 if (flag&FLAG_ECE)
1711 tcp_enter_cwr(tp); 1730 tcp_enter_cwr(sk);
1712 1731
1713 if (tp->ca_state != TCP_CA_CWR) { 1732 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
1714 int state = TCP_CA_Open; 1733 int state = TCP_CA_Open;
1715 1734
1716 if (tp->left_out || tp->retrans_out || tp->undo_marker) 1735 if (tp->left_out || tp->retrans_out || tp->undo_marker)
1717 state = TCP_CA_Disorder; 1736 state = TCP_CA_Disorder;
1718 1737
1719 if (tp->ca_state != state) { 1738 if (inet_csk(sk)->icsk_ca_state != state) {
1720 tcp_set_ca_state(tp, state); 1739 tcp_set_ca_state(sk, state);
1721 tp->high_seq = tp->snd_nxt; 1740 tp->high_seq = tp->snd_nxt;
1722 } 1741 }
1723 tcp_moderate_cwnd(tp); 1742 tcp_moderate_cwnd(tp);
1724 } else { 1743 } else {
1725 tcp_cwnd_down(tp); 1744 tcp_cwnd_down(sk);
1726 } 1745 }
1727} 1746}
1728 1747
@@ -1741,6 +1760,7 @@ static void
1741tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, 1760tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1742 int prior_packets, int flag) 1761 int prior_packets, int flag)
1743{ 1762{
1763 struct inet_connection_sock *icsk = inet_csk(sk);
1744 struct tcp_sock *tp = tcp_sk(sk); 1764 struct tcp_sock *tp = tcp_sk(sk);
1745 int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP)); 1765 int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP));
1746 1766
@@ -1764,7 +1784,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1764 /* C. Process data loss notification, provided it is valid. */ 1784 /* C. Process data loss notification, provided it is valid. */
1765 if ((flag&FLAG_DATA_LOST) && 1785 if ((flag&FLAG_DATA_LOST) &&
1766 before(tp->snd_una, tp->high_seq) && 1786 before(tp->snd_una, tp->high_seq) &&
1767 tp->ca_state != TCP_CA_Open && 1787 icsk->icsk_ca_state != TCP_CA_Open &&
1768 tp->fackets_out > tp->reordering) { 1788 tp->fackets_out > tp->reordering) {
1769 tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq); 1789 tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
1770 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); 1790 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
@@ -1775,14 +1795,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1775 1795
1776 /* E. Check state exit conditions. State can be terminated 1796 /* E. Check state exit conditions. State can be terminated
1777 * when high_seq is ACKed. */ 1797 * when high_seq is ACKed. */
1778 if (tp->ca_state == TCP_CA_Open) { 1798 if (icsk->icsk_ca_state == TCP_CA_Open) {
1779 if (!sysctl_tcp_frto) 1799 if (!sysctl_tcp_frto)
1780 BUG_TRAP(tp->retrans_out == 0); 1800 BUG_TRAP(tp->retrans_out == 0);
1781 tp->retrans_stamp = 0; 1801 tp->retrans_stamp = 0;
1782 } else if (!before(tp->snd_una, tp->high_seq)) { 1802 } else if (!before(tp->snd_una, tp->high_seq)) {
1783 switch (tp->ca_state) { 1803 switch (icsk->icsk_ca_state) {
1784 case TCP_CA_Loss: 1804 case TCP_CA_Loss:
1785 inet_csk(sk)->icsk_retransmits = 0; 1805 icsk->icsk_retransmits = 0;
1786 if (tcp_try_undo_recovery(sk, tp)) 1806 if (tcp_try_undo_recovery(sk, tp))
1787 return; 1807 return;
1788 break; 1808 break;
@@ -1791,8 +1811,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1791 /* CWR is to be held something *above* high_seq 1811 /* CWR is to be held something *above* high_seq
1792 * is ACKed for CWR bit to reach receiver. */ 1812 * is ACKed for CWR bit to reach receiver. */
1793 if (tp->snd_una != tp->high_seq) { 1813 if (tp->snd_una != tp->high_seq) {
1794 tcp_complete_cwr(tp); 1814 tcp_complete_cwr(sk);
1795 tcp_set_ca_state(tp, TCP_CA_Open); 1815 tcp_set_ca_state(sk, TCP_CA_Open);
1796 } 1816 }
1797 break; 1817 break;
1798 1818
@@ -1803,7 +1823,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1803 * catching for all duplicate ACKs. */ 1823 * catching for all duplicate ACKs. */
1804 IsReno(tp) || tp->snd_una != tp->high_seq) { 1824 IsReno(tp) || tp->snd_una != tp->high_seq) {
1805 tp->undo_marker = 0; 1825 tp->undo_marker = 0;
1806 tcp_set_ca_state(tp, TCP_CA_Open); 1826 tcp_set_ca_state(sk, TCP_CA_Open);
1807 } 1827 }
1808 break; 1828 break;
1809 1829
@@ -1812,17 +1832,17 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1812 tcp_reset_reno_sack(tp); 1832 tcp_reset_reno_sack(tp);
1813 if (tcp_try_undo_recovery(sk, tp)) 1833 if (tcp_try_undo_recovery(sk, tp))
1814 return; 1834 return;
1815 tcp_complete_cwr(tp); 1835 tcp_complete_cwr(sk);
1816 break; 1836 break;
1817 } 1837 }
1818 } 1838 }
1819 1839
1820 /* F. Process state. */ 1840 /* F. Process state. */
1821 switch (tp->ca_state) { 1841 switch (icsk->icsk_ca_state) {
1822 case TCP_CA_Recovery: 1842 case TCP_CA_Recovery:
1823 if (prior_snd_una == tp->snd_una) { 1843 if (prior_snd_una == tp->snd_una) {
1824 if (IsReno(tp) && is_dupack) 1844 if (IsReno(tp) && is_dupack)
1825 tcp_add_reno_sack(tp); 1845 tcp_add_reno_sack(sk);
1826 } else { 1846 } else {
1827 int acked = prior_packets - tp->packets_out; 1847 int acked = prior_packets - tp->packets_out;
1828 if (IsReno(tp)) 1848 if (IsReno(tp))
@@ -1832,13 +1852,13 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1832 break; 1852 break;
1833 case TCP_CA_Loss: 1853 case TCP_CA_Loss:
1834 if (flag&FLAG_DATA_ACKED) 1854 if (flag&FLAG_DATA_ACKED)
1835 inet_csk(sk)->icsk_retransmits = 0; 1855 icsk->icsk_retransmits = 0;
1836 if (!tcp_try_undo_loss(sk, tp)) { 1856 if (!tcp_try_undo_loss(sk, tp)) {
1837 tcp_moderate_cwnd(tp); 1857 tcp_moderate_cwnd(tp);
1838 tcp_xmit_retransmit_queue(sk); 1858 tcp_xmit_retransmit_queue(sk);
1839 return; 1859 return;
1840 } 1860 }
1841 if (tp->ca_state != TCP_CA_Open) 1861 if (icsk->icsk_ca_state != TCP_CA_Open)
1842 return; 1862 return;
1843 /* Loss is undone; fall through to processing in Open state. */ 1863 /* Loss is undone; fall through to processing in Open state. */
1844 default: 1864 default:
@@ -1846,10 +1866,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1846 if (tp->snd_una != prior_snd_una) 1866 if (tp->snd_una != prior_snd_una)
1847 tcp_reset_reno_sack(tp); 1867 tcp_reset_reno_sack(tp);
1848 if (is_dupack) 1868 if (is_dupack)
1849 tcp_add_reno_sack(tp); 1869 tcp_add_reno_sack(sk);
1850 } 1870 }
1851 1871
1852 if (tp->ca_state == TCP_CA_Disorder) 1872 if (icsk->icsk_ca_state == TCP_CA_Disorder)
1853 tcp_try_undo_dsack(sk, tp); 1873 tcp_try_undo_dsack(sk, tp);
1854 1874
1855 if (!tcp_time_to_recover(sk, tp)) { 1875 if (!tcp_time_to_recover(sk, tp)) {
@@ -1869,20 +1889,20 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1869 tp->undo_marker = tp->snd_una; 1889 tp->undo_marker = tp->snd_una;
1870 tp->undo_retrans = tp->retrans_out; 1890 tp->undo_retrans = tp->retrans_out;
1871 1891
1872 if (tp->ca_state < TCP_CA_CWR) { 1892 if (icsk->icsk_ca_state < TCP_CA_CWR) {
1873 if (!(flag&FLAG_ECE)) 1893 if (!(flag&FLAG_ECE))
1874 tp->prior_ssthresh = tcp_current_ssthresh(tp); 1894 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1875 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 1895 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1876 TCP_ECN_queue_cwr(tp); 1896 TCP_ECN_queue_cwr(tp);
1877 } 1897 }
1878 1898
1879 tp->snd_cwnd_cnt = 0; 1899 tp->snd_cwnd_cnt = 0;
1880 tcp_set_ca_state(tp, TCP_CA_Recovery); 1900 tcp_set_ca_state(sk, TCP_CA_Recovery);
1881 } 1901 }
1882 1902
1883 if (is_dupack || tcp_head_timedout(sk, tp)) 1903 if (is_dupack || tcp_head_timedout(sk, tp))
1884 tcp_update_scoreboard(sk, tp); 1904 tcp_update_scoreboard(sk, tp);
1885 tcp_cwnd_down(tp); 1905 tcp_cwnd_down(sk);
1886 tcp_xmit_retransmit_queue(sk); 1906 tcp_xmit_retransmit_queue(sk);
1887} 1907}
1888 1908
@@ -1908,7 +1928,7 @@ static void tcp_ack_saw_tstamp(struct sock *sk, u32 *usrtt, int flag)
1908 */ 1928 */
1909 struct tcp_sock *tp = tcp_sk(sk); 1929 struct tcp_sock *tp = tcp_sk(sk);
1910 const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 1930 const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
1911 tcp_rtt_estimator(tp, seq_rtt, usrtt); 1931 tcp_rtt_estimator(sk, seq_rtt, usrtt);
1912 tcp_set_rto(sk); 1932 tcp_set_rto(sk);
1913 inet_csk(sk)->icsk_backoff = 0; 1933 inet_csk(sk)->icsk_backoff = 0;
1914 tcp_bound_rto(sk); 1934 tcp_bound_rto(sk);
@@ -1928,7 +1948,7 @@ static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, u32 *usrtt, int flag
1928 if (flag & FLAG_RETRANS_DATA_ACKED) 1948 if (flag & FLAG_RETRANS_DATA_ACKED)
1929 return; 1949 return;
1930 1950
1931 tcp_rtt_estimator(tcp_sk(sk), seq_rtt, usrtt); 1951 tcp_rtt_estimator(sk, seq_rtt, usrtt);
1932 tcp_set_rto(sk); 1952 tcp_set_rto(sk);
1933 inet_csk(sk)->icsk_backoff = 0; 1953 inet_csk(sk)->icsk_backoff = 0;
1934 tcp_bound_rto(sk); 1954 tcp_bound_rto(sk);
@@ -1945,11 +1965,12 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
1945 tcp_ack_no_tstamp(sk, seq_rtt, usrtt, flag); 1965 tcp_ack_no_tstamp(sk, seq_rtt, usrtt, flag);
1946} 1966}
1947 1967
1948static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, 1968static inline void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
1949 u32 in_flight, int good) 1969 u32 in_flight, int good)
1950{ 1970{
1951 tp->ca_ops->cong_avoid(tp, ack, rtt, in_flight, good); 1971 const struct inet_connection_sock *icsk = inet_csk(sk);
1952 tp->snd_cwnd_stamp = tcp_time_stamp; 1972 icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good);
1973 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
1953} 1974}
1954 1975
1955/* Restart timer after forward progress on connection. 1976/* Restart timer after forward progress on connection.
@@ -2098,11 +2119,12 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2098 } 2119 }
2099 2120
2100 if (acked&FLAG_ACKED) { 2121 if (acked&FLAG_ACKED) {
2122 const struct inet_connection_sock *icsk = inet_csk(sk);
2101 tcp_ack_update_rtt(sk, acked, seq_rtt, seq_usrtt); 2123 tcp_ack_update_rtt(sk, acked, seq_rtt, seq_usrtt);
2102 tcp_ack_packets_out(sk, tp); 2124 tcp_ack_packets_out(sk, tp);
2103 2125
2104 if (tp->ca_ops->pkts_acked) 2126 if (icsk->icsk_ca_ops->pkts_acked)
2105 tp->ca_ops->pkts_acked(tp, pkts_acked); 2127 icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked);
2106 } 2128 }
2107 2129
2108#if FASTRETRANS_DEBUG > 0 2130#if FASTRETRANS_DEBUG > 0
@@ -2110,19 +2132,20 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2110 BUG_TRAP((int)tp->lost_out >= 0); 2132 BUG_TRAP((int)tp->lost_out >= 0);
2111 BUG_TRAP((int)tp->retrans_out >= 0); 2133 BUG_TRAP((int)tp->retrans_out >= 0);
2112 if (!tp->packets_out && tp->rx_opt.sack_ok) { 2134 if (!tp->packets_out && tp->rx_opt.sack_ok) {
2135 const struct inet_connection_sock *icsk = inet_csk(sk);
2113 if (tp->lost_out) { 2136 if (tp->lost_out) {
2114 printk(KERN_DEBUG "Leak l=%u %d\n", 2137 printk(KERN_DEBUG "Leak l=%u %d\n",
2115 tp->lost_out, tp->ca_state); 2138 tp->lost_out, icsk->icsk_ca_state);
2116 tp->lost_out = 0; 2139 tp->lost_out = 0;
2117 } 2140 }
2118 if (tp->sacked_out) { 2141 if (tp->sacked_out) {
2119 printk(KERN_DEBUG "Leak s=%u %d\n", 2142 printk(KERN_DEBUG "Leak s=%u %d\n",
2120 tp->sacked_out, tp->ca_state); 2143 tp->sacked_out, icsk->icsk_ca_state);
2121 tp->sacked_out = 0; 2144 tp->sacked_out = 0;
2122 } 2145 }
2123 if (tp->retrans_out) { 2146 if (tp->retrans_out) {
2124 printk(KERN_DEBUG "Leak r=%u %d\n", 2147 printk(KERN_DEBUG "Leak r=%u %d\n",
2125 tp->retrans_out, tp->ca_state); 2148 tp->retrans_out, icsk->icsk_ca_state);
2126 tp->retrans_out = 0; 2149 tp->retrans_out = 0;
2127 } 2150 }
2128 } 2151 }
@@ -2152,16 +2175,17 @@ static void tcp_ack_probe(struct sock *sk)
2152 } 2175 }
2153} 2176}
2154 2177
2155static inline int tcp_ack_is_dubious(struct tcp_sock *tp, int flag) 2178static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
2156{ 2179{
2157 return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 2180 return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
2158 tp->ca_state != TCP_CA_Open); 2181 inet_csk(sk)->icsk_ca_state != TCP_CA_Open);
2159} 2182}
2160 2183
2161static inline int tcp_may_raise_cwnd(struct tcp_sock *tp, int flag) 2184static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
2162{ 2185{
2186 const struct tcp_sock *tp = tcp_sk(sk);
2163 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && 2187 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
2164 !((1<<tp->ca_state)&(TCPF_CA_Recovery|TCPF_CA_CWR)); 2188 !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR));
2165} 2189}
2166 2190
2167/* Check that window update is acceptable. 2191/* Check that window update is acceptable.
@@ -2251,6 +2275,7 @@ static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
2251/* This routine deals with incoming acks, but not outgoing ones. */ 2275/* This routine deals with incoming acks, but not outgoing ones. */
2252static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) 2276static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2253{ 2277{
2278 struct inet_connection_sock *icsk = inet_csk(sk);
2254 struct tcp_sock *tp = tcp_sk(sk); 2279 struct tcp_sock *tp = tcp_sk(sk);
2255 u32 prior_snd_una = tp->snd_una; 2280 u32 prior_snd_una = tp->snd_una;
2256 u32 ack_seq = TCP_SKB_CB(skb)->seq; 2281 u32 ack_seq = TCP_SKB_CB(skb)->seq;
@@ -2278,7 +2303,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2278 tp->snd_una = ack; 2303 tp->snd_una = ack;
2279 flag |= FLAG_WIN_UPDATE; 2304 flag |= FLAG_WIN_UPDATE;
2280 2305
2281 tcp_ca_event(tp, CA_EVENT_FAST_ACK); 2306 tcp_ca_event(sk, CA_EVENT_FAST_ACK);
2282 2307
2283 NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS); 2308 NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
2284 } else { 2309 } else {
@@ -2295,7 +2320,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2295 if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th)) 2320 if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))
2296 flag |= FLAG_ECE; 2321 flag |= FLAG_ECE;
2297 2322
2298 tcp_ca_event(tp, CA_EVENT_SLOW_ACK); 2323 tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
2299 } 2324 }
2300 2325
2301 /* We passed data and got it acked, remove any soft error 2326 /* We passed data and got it acked, remove any soft error
@@ -2311,19 +2336,19 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2311 2336
2312 /* See if we can take anything off of the retransmit queue. */ 2337 /* See if we can take anything off of the retransmit queue. */
2313 flag |= tcp_clean_rtx_queue(sk, &seq_rtt, 2338 flag |= tcp_clean_rtx_queue(sk, &seq_rtt,
2314 tp->ca_ops->rtt_sample ? &seq_usrtt : NULL); 2339 icsk->icsk_ca_ops->rtt_sample ? &seq_usrtt : NULL);
2315 2340
2316 if (tp->frto_counter) 2341 if (tp->frto_counter)
2317 tcp_process_frto(sk, prior_snd_una); 2342 tcp_process_frto(sk, prior_snd_una);
2318 2343
2319 if (tcp_ack_is_dubious(tp, flag)) { 2344 if (tcp_ack_is_dubious(sk, flag)) {
2320 /* Advanve CWND, if state allows this. */ 2345 /* Advanve CWND, if state allows this. */
2321 if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(tp, flag)) 2346 if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
2322 tcp_cong_avoid(tp, ack, seq_rtt, prior_in_flight, 0); 2347 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0);
2323 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); 2348 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
2324 } else { 2349 } else {
2325 if ((flag & FLAG_DATA_ACKED)) 2350 if ((flag & FLAG_DATA_ACKED))
2326 tcp_cong_avoid(tp, ack, seq_rtt, prior_in_flight, 1); 2351 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1);
2327 } 2352 }
2328 2353
2329 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP)) 2354 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
@@ -2332,7 +2357,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2332 return 1; 2357 return 1;
2333 2358
2334no_queue: 2359no_queue:
2335 tp->probes_out = 0; 2360 icsk->icsk_probes_out = 0;
2336 2361
2337 /* If this ack opens up a zero window, clear backoff. It was 2362 /* If this ack opens up a zero window, clear backoff. It was
2338 * being used to time the probes, and is probably far higher than 2363 * being used to time the probes, and is probably far higher than
@@ -3301,12 +3326,12 @@ void tcp_cwnd_application_limited(struct sock *sk)
3301{ 3326{
3302 struct tcp_sock *tp = tcp_sk(sk); 3327 struct tcp_sock *tp = tcp_sk(sk);
3303 3328
3304 if (tp->ca_state == TCP_CA_Open && 3329 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
3305 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 3330 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
3306 /* Limited by application or receiver window. */ 3331 /* Limited by application or receiver window. */
3307 u32 win_used = max(tp->snd_cwnd_used, 2U); 3332 u32 win_used = max(tp->snd_cwnd_used, 2U);
3308 if (win_used < tp->snd_cwnd) { 3333 if (win_used < tp->snd_cwnd) {
3309 tp->snd_ssthresh = tcp_current_ssthresh(tp); 3334 tp->snd_ssthresh = tcp_current_ssthresh(sk);
3310 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 3335 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
3311 } 3336 }
3312 tp->snd_cwnd_used = 0; 3337 tp->snd_cwnd_used = 0;
@@ -3935,7 +3960,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
3935 3960
3936 tcp_init_metrics(sk); 3961 tcp_init_metrics(sk);
3937 3962
3938 tcp_init_congestion_control(tp); 3963 tcp_init_congestion_control(sk);
3939 3964
3940 /* Prevent spurious tcp_cwnd_restart() on first data 3965 /* Prevent spurious tcp_cwnd_restart() on first data
3941 * packet. 3966 * packet.
@@ -4212,7 +4237,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4212 4237
4213 tcp_init_metrics(sk); 4238 tcp_init_metrics(sk);
4214 4239
4215 tcp_init_congestion_control(tp); 4240 tcp_init_congestion_control(sk);
4216 4241
4217 /* Prevent spurious tcp_cwnd_restart() on 4242 /* Prevent spurious tcp_cwnd_restart() on
4218 * first data packet. 4243 * first data packet.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 32a0ebc589d5..97bbf595230d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1409,13 +1409,14 @@ struct tcp_func ipv4_specific = {
1409 */ 1409 */
1410static int tcp_v4_init_sock(struct sock *sk) 1410static int tcp_v4_init_sock(struct sock *sk)
1411{ 1411{
1412 struct inet_connection_sock *icsk = inet_csk(sk);
1412 struct tcp_sock *tp = tcp_sk(sk); 1413 struct tcp_sock *tp = tcp_sk(sk);
1413 1414
1414 skb_queue_head_init(&tp->out_of_order_queue); 1415 skb_queue_head_init(&tp->out_of_order_queue);
1415 tcp_init_xmit_timers(sk); 1416 tcp_init_xmit_timers(sk);
1416 tcp_prequeue_init(tp); 1417 tcp_prequeue_init(tp);
1417 1418
1418 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 1419 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1419 tp->mdev = TCP_TIMEOUT_INIT; 1420 tp->mdev = TCP_TIMEOUT_INIT;
1420 1421
1421 /* So many TCP implementations out there (incorrectly) count the 1422 /* So many TCP implementations out there (incorrectly) count the
@@ -1433,7 +1434,7 @@ static int tcp_v4_init_sock(struct sock *sk)
1433 tp->mss_cache = 536; 1434 tp->mss_cache = 536;
1434 1435
1435 tp->reordering = sysctl_tcp_reordering; 1436 tp->reordering = sysctl_tcp_reordering;
1436 tp->ca_ops = &tcp_init_congestion_ops; 1437 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1437 1438
1438 sk->sk_state = TCP_CLOSE; 1439 sk->sk_state = TCP_CLOSE;
1439 1440
@@ -1456,7 +1457,7 @@ int tcp_v4_destroy_sock(struct sock *sk)
1456 1457
1457 tcp_clear_xmit_timers(sk); 1458 tcp_clear_xmit_timers(sk);
1458 1459
1459 tcp_cleanup_congestion_control(tp); 1460 tcp_cleanup_congestion_control(sk);
1460 1461
1461 /* Cleanup up the write buffer. */ 1462 /* Cleanup up the write buffer. */
1462 sk_stream_writequeue_purge(sk); 1463 sk_stream_writequeue_purge(sk);
@@ -1883,7 +1884,7 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
1883 jiffies_to_clock_t(timer_expires - jiffies), 1884 jiffies_to_clock_t(timer_expires - jiffies),
1884 icsk->icsk_retransmits, 1885 icsk->icsk_retransmits,
1885 sock_i_uid(sp), 1886 sock_i_uid(sp),
1886 tp->probes_out, 1887 icsk->icsk_probes_out,
1887 sock_i_ino(sp), 1888 sock_i_ino(sp),
1888 atomic_read(&sp->sk_refcnt), sp, 1889 atomic_read(&sp->sk_refcnt), sp,
1889 icsk->icsk_rto, 1890 icsk->icsk_rto,
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index dc085233d512..a88db28b0af7 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -384,9 +384,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
384 newtp->frto_counter = 0; 384 newtp->frto_counter = 0;
385 newtp->frto_highmark = 0; 385 newtp->frto_highmark = 0;
386 386
387 newtp->ca_ops = &tcp_reno; 387 newicsk->icsk_ca_ops = &tcp_reno;
388 388
389 tcp_set_ca_state(newtp, TCP_CA_Open); 389 tcp_set_ca_state(newsk, TCP_CA_Open);
390 tcp_init_xmit_timers(newsk); 390 tcp_init_xmit_timers(newsk);
391 skb_queue_head_init(&newtp->out_of_order_queue); 391 skb_queue_head_init(&newtp->out_of_order_queue);
392 newtp->rcv_wup = treq->rcv_isn + 1; 392 newtp->rcv_wup = treq->rcv_isn + 1;
@@ -399,7 +399,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
399 newtp->rx_opt.dsack = 0; 399 newtp->rx_opt.dsack = 0;
400 newtp->rx_opt.eff_sacks = 0; 400 newtp->rx_opt.eff_sacks = 0;
401 401
402 newtp->probes_out = 0;
403 newtp->rx_opt.num_sacks = 0; 402 newtp->rx_opt.num_sacks = 0;
404 newtp->urg_data = 0; 403 newtp->urg_data = 0;
405 404
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f458eacb5ef2..267b0fcbfc9c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -112,9 +112,9 @@ static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
112 u32 restart_cwnd = tcp_init_cwnd(tp, dst); 112 u32 restart_cwnd = tcp_init_cwnd(tp, dst);
113 u32 cwnd = tp->snd_cwnd; 113 u32 cwnd = tp->snd_cwnd;
114 114
115 tcp_ca_event(tp, CA_EVENT_CWND_RESTART); 115 tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
116 116
117 tp->snd_ssthresh = tcp_current_ssthresh(tp); 117 tp->snd_ssthresh = tcp_current_ssthresh(sk);
118 restart_cwnd = min(restart_cwnd, cwnd); 118 restart_cwnd = min(restart_cwnd, cwnd);
119 119
120 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 120 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
@@ -265,6 +265,7 @@ static __inline__ u16 tcp_select_window(struct sock *sk)
265static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) 265static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
266{ 266{
267 if (skb != NULL) { 267 if (skb != NULL) {
268 const struct inet_connection_sock *icsk = inet_csk(sk);
268 struct inet_sock *inet = inet_sk(sk); 269 struct inet_sock *inet = inet_sk(sk);
269 struct tcp_sock *tp = tcp_sk(sk); 270 struct tcp_sock *tp = tcp_sk(sk);
270 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 271 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
@@ -280,7 +281,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
280#define SYSCTL_FLAG_SACK 0x4 281#define SYSCTL_FLAG_SACK 0x4
281 282
282 /* If congestion control is doing timestamping */ 283 /* If congestion control is doing timestamping */
283 if (tp->ca_ops->rtt_sample) 284 if (icsk->icsk_ca_ops->rtt_sample)
284 do_gettimeofday(&skb->stamp); 285 do_gettimeofday(&skb->stamp);
285 286
286 sysctl_flags = 0; 287 sysctl_flags = 0;
@@ -308,7 +309,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
308 } 309 }
309 310
310 if (tcp_packets_in_flight(tp) == 0) 311 if (tcp_packets_in_flight(tp) == 0)
311 tcp_ca_event(tp, CA_EVENT_TX_START); 312 tcp_ca_event(sk, CA_EVENT_TX_START);
312 313
313 th = (struct tcphdr *) skb_push(skb, tcp_header_size); 314 th = (struct tcphdr *) skb_push(skb, tcp_header_size);
314 skb->h.th = th; 315 skb->h.th = th;
@@ -366,7 +367,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
366 if (err <= 0) 367 if (err <= 0)
367 return err; 368 return err;
368 369
369 tcp_enter_cwr(tp); 370 tcp_enter_cwr(sk);
370 371
371 /* NET_XMIT_CN is special. It does not guarantee, 372 /* NET_XMIT_CN is special. It does not guarantee,
372 * that this packet is lost. It tells that device 373 * that this packet is lost. It tells that device
@@ -905,12 +906,13 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
905 */ 906 */
906static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) 907static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
907{ 908{
909 const struct inet_connection_sock *icsk = inet_csk(sk);
908 u32 send_win, cong_win, limit, in_flight; 910 u32 send_win, cong_win, limit, in_flight;
909 911
910 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 912 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
911 return 0; 913 return 0;
912 914
913 if (tp->ca_state != TCP_CA_Open) 915 if (icsk->icsk_ca_state != TCP_CA_Open)
914 return 0; 916 return 0;
915 917
916 in_flight = tcp_packets_in_flight(tp); 918 in_flight = tcp_packets_in_flight(tp);
@@ -1287,6 +1289,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
1287 */ 1289 */
1288void tcp_simple_retransmit(struct sock *sk) 1290void tcp_simple_retransmit(struct sock *sk)
1289{ 1291{
1292 const struct inet_connection_sock *icsk = inet_csk(sk);
1290 struct tcp_sock *tp = tcp_sk(sk); 1293 struct tcp_sock *tp = tcp_sk(sk);
1291 struct sk_buff *skb; 1294 struct sk_buff *skb;
1292 unsigned int mss = tcp_current_mss(sk, 0); 1295 unsigned int mss = tcp_current_mss(sk, 0);
@@ -1317,12 +1320,12 @@ void tcp_simple_retransmit(struct sock *sk)
1317 * in network, but units changed and effective 1320 * in network, but units changed and effective
1318 * cwnd/ssthresh really reduced now. 1321 * cwnd/ssthresh really reduced now.
1319 */ 1322 */
1320 if (tp->ca_state != TCP_CA_Loss) { 1323 if (icsk->icsk_ca_state != TCP_CA_Loss) {
1321 tp->high_seq = tp->snd_nxt; 1324 tp->high_seq = tp->snd_nxt;
1322 tp->snd_ssthresh = tcp_current_ssthresh(tp); 1325 tp->snd_ssthresh = tcp_current_ssthresh(sk);
1323 tp->prior_ssthresh = 0; 1326 tp->prior_ssthresh = 0;
1324 tp->undo_marker = 0; 1327 tp->undo_marker = 0;
1325 tcp_set_ca_state(tp, TCP_CA_Loss); 1328 tcp_set_ca_state(sk, TCP_CA_Loss);
1326 } 1329 }
1327 tcp_xmit_retransmit_queue(sk); 1330 tcp_xmit_retransmit_queue(sk);
1328} 1331}
@@ -1462,6 +1465,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1462 */ 1465 */
1463void tcp_xmit_retransmit_queue(struct sock *sk) 1466void tcp_xmit_retransmit_queue(struct sock *sk)
1464{ 1467{
1468 const struct inet_connection_sock *icsk = inet_csk(sk);
1465 struct tcp_sock *tp = tcp_sk(sk); 1469 struct tcp_sock *tp = tcp_sk(sk);
1466 struct sk_buff *skb; 1470 struct sk_buff *skb;
1467 int packet_cnt = tp->lost_out; 1471 int packet_cnt = tp->lost_out;
@@ -1485,7 +1489,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
1485 if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { 1489 if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
1486 if (tcp_retransmit_skb(sk, skb)) 1490 if (tcp_retransmit_skb(sk, skb))
1487 return; 1491 return;
1488 if (tp->ca_state != TCP_CA_Loss) 1492 if (icsk->icsk_ca_state != TCP_CA_Loss)
1489 NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); 1493 NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
1490 else 1494 else
1491 NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); 1495 NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
@@ -1507,7 +1511,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
1507 /* OK, demanded retransmission is finished. */ 1511 /* OK, demanded retransmission is finished. */
1508 1512
1509 /* Forward retransmissions are possible only during Recovery. */ 1513 /* Forward retransmissions are possible only during Recovery. */
1510 if (tp->ca_state != TCP_CA_Recovery) 1514 if (icsk->icsk_ca_state != TCP_CA_Recovery)
1511 return; 1515 return;
1512 1516
1513 /* No forward retransmissions in Reno are possible. */ 1517 /* No forward retransmissions in Reno are possible. */
@@ -2028,7 +2032,7 @@ void tcp_send_probe0(struct sock *sk)
2028 2032
2029 if (tp->packets_out || !sk->sk_send_head) { 2033 if (tp->packets_out || !sk->sk_send_head) {
2030 /* Cancel probe timer, if it is not required. */ 2034 /* Cancel probe timer, if it is not required. */
2031 tp->probes_out = 0; 2035 icsk->icsk_probes_out = 0;
2032 icsk->icsk_backoff = 0; 2036 icsk->icsk_backoff = 0;
2033 return; 2037 return;
2034 } 2038 }
@@ -2036,19 +2040,19 @@ void tcp_send_probe0(struct sock *sk)
2036 if (err <= 0) { 2040 if (err <= 0) {
2037 if (icsk->icsk_backoff < sysctl_tcp_retries2) 2041 if (icsk->icsk_backoff < sysctl_tcp_retries2)
2038 icsk->icsk_backoff++; 2042 icsk->icsk_backoff++;
2039 tp->probes_out++; 2043 icsk->icsk_probes_out++;
2040 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2044 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2041 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 2045 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2042 TCP_RTO_MAX); 2046 TCP_RTO_MAX);
2043 } else { 2047 } else {
2044 /* If packet was not sent due to local congestion, 2048 /* If packet was not sent due to local congestion,
2045 * do not backoff and do not remember probes_out. 2049 * do not backoff and do not remember icsk_probes_out.
2046 * Let local senders to fight for local resources. 2050 * Let local senders to fight for local resources.
2047 * 2051 *
2048 * Use accumulated backoff yet. 2052 * Use accumulated backoff yet.
2049 */ 2053 */
2050 if (!tp->probes_out) 2054 if (!icsk->icsk_probes_out)
2051 tp->probes_out=1; 2055 icsk->icsk_probes_out = 1;
2052 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2056 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2053 min(icsk->icsk_rto << icsk->icsk_backoff, 2057 min(icsk->icsk_rto << icsk->icsk_backoff,
2054 TCP_RESOURCE_PROBE_INTERVAL), 2058 TCP_RESOURCE_PROBE_INTERVAL),
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 70e108e15c71..327770bf5522 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -16,9 +16,10 @@
16#define TCP_SCALABLE_AI_CNT 50U 16#define TCP_SCALABLE_AI_CNT 50U
17#define TCP_SCALABLE_MD_SCALE 3 17#define TCP_SCALABLE_MD_SCALE 3
18 18
19static void tcp_scalable_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, 19static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
20 u32 in_flight, int flag) 20 u32 in_flight, int flag)
21{ 21{
22 struct tcp_sock *tp = tcp_sk(sk);
22 if (in_flight < tp->snd_cwnd) 23 if (in_flight < tp->snd_cwnd)
23 return; 24 return;
24 25
@@ -35,8 +36,9 @@ static void tcp_scalable_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt,
35 tp->snd_cwnd_stamp = tcp_time_stamp; 36 tp->snd_cwnd_stamp = tcp_time_stamp;
36} 37}
37 38
38static u32 tcp_scalable_ssthresh(struct tcp_sock *tp) 39static u32 tcp_scalable_ssthresh(struct sock *sk)
39{ 40{
41 const struct tcp_sock *tp = tcp_sk(sk);
40 return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); 42 return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
41} 43}
42 44
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 72cec6981830..415ee47ac1c5 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -233,11 +233,12 @@ out_unlock:
233 233
234static void tcp_probe_timer(struct sock *sk) 234static void tcp_probe_timer(struct sock *sk)
235{ 235{
236 struct inet_connection_sock *icsk = inet_csk(sk);
236 struct tcp_sock *tp = tcp_sk(sk); 237 struct tcp_sock *tp = tcp_sk(sk);
237 int max_probes; 238 int max_probes;
238 239
239 if (tp->packets_out || !sk->sk_send_head) { 240 if (tp->packets_out || !sk->sk_send_head) {
240 tp->probes_out = 0; 241 icsk->icsk_probes_out = 0;
241 return; 242 return;
242 } 243 }
243 244
@@ -248,7 +249,7 @@ static void tcp_probe_timer(struct sock *sk)
248 * FIXME: We ought not to do it, Solaris 2.5 actually has fixing 249 * FIXME: We ought not to do it, Solaris 2.5 actually has fixing
249 * this behaviour in Solaris down as a bug fix. [AC] 250 * this behaviour in Solaris down as a bug fix. [AC]
250 * 251 *
251 * Let me to explain. probes_out is zeroed by incoming ACKs 252 * Let me to explain. icsk_probes_out is zeroed by incoming ACKs
252 * even if they advertise zero window. Hence, connection is killed only 253 * even if they advertise zero window. Hence, connection is killed only
253 * if we received no ACKs for normal connection timeout. It is not killed 254 * if we received no ACKs for normal connection timeout. It is not killed
254 * only because window stays zero for some time, window may be zero 255 * only because window stays zero for some time, window may be zero
@@ -259,16 +260,15 @@ static void tcp_probe_timer(struct sock *sk)
259 max_probes = sysctl_tcp_retries2; 260 max_probes = sysctl_tcp_retries2;
260 261
261 if (sock_flag(sk, SOCK_DEAD)) { 262 if (sock_flag(sk, SOCK_DEAD)) {
262 const struct inet_connection_sock *icsk = inet_csk(sk);
263 const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); 263 const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
264 264
265 max_probes = tcp_orphan_retries(sk, alive); 265 max_probes = tcp_orphan_retries(sk, alive);
266 266
267 if (tcp_out_of_resources(sk, alive || tp->probes_out <= max_probes)) 267 if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes))
268 return; 268 return;
269 } 269 }
270 270
271 if (tp->probes_out > max_probes) { 271 if (icsk->icsk_probes_out > max_probes) {
272 tcp_write_err(sk); 272 tcp_write_err(sk);
273 } else { 273 } else {
274 /* Only send another probe if we didn't close things up. */ 274 /* Only send another probe if we didn't close things up. */
@@ -319,19 +319,20 @@ static void tcp_retransmit_timer(struct sock *sk)
319 goto out; 319 goto out;
320 320
321 if (icsk->icsk_retransmits == 0) { 321 if (icsk->icsk_retransmits == 0) {
322 if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) { 322 if (icsk->icsk_ca_state == TCP_CA_Disorder ||
323 icsk->icsk_ca_state == TCP_CA_Recovery) {
323 if (tp->rx_opt.sack_ok) { 324 if (tp->rx_opt.sack_ok) {
324 if (tp->ca_state == TCP_CA_Recovery) 325 if (icsk->icsk_ca_state == TCP_CA_Recovery)
325 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); 326 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL);
326 else 327 else
327 NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES); 328 NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES);
328 } else { 329 } else {
329 if (tp->ca_state == TCP_CA_Recovery) 330 if (icsk->icsk_ca_state == TCP_CA_Recovery)
330 NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL); 331 NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL);
331 else 332 else
332 NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES); 333 NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES);
333 } 334 }
334 } else if (tp->ca_state == TCP_CA_Loss) { 335 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
335 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES); 336 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES);
336 } else { 337 } else {
337 NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS); 338 NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS);
@@ -449,6 +450,7 @@ void tcp_set_keepalive(struct sock *sk, int val)
449static void tcp_keepalive_timer (unsigned long data) 450static void tcp_keepalive_timer (unsigned long data)
450{ 451{
451 struct sock *sk = (struct sock *) data; 452 struct sock *sk = (struct sock *) data;
453 struct inet_connection_sock *icsk = inet_csk(sk);
452 struct tcp_sock *tp = tcp_sk(sk); 454 struct tcp_sock *tp = tcp_sk(sk);
453 __u32 elapsed; 455 __u32 elapsed;
454 456
@@ -490,14 +492,14 @@ static void tcp_keepalive_timer (unsigned long data)
490 elapsed = tcp_time_stamp - tp->rcv_tstamp; 492 elapsed = tcp_time_stamp - tp->rcv_tstamp;
491 493
492 if (elapsed >= keepalive_time_when(tp)) { 494 if (elapsed >= keepalive_time_when(tp)) {
493 if ((!tp->keepalive_probes && tp->probes_out >= sysctl_tcp_keepalive_probes) || 495 if ((!tp->keepalive_probes && icsk->icsk_probes_out >= sysctl_tcp_keepalive_probes) ||
494 (tp->keepalive_probes && tp->probes_out >= tp->keepalive_probes)) { 496 (tp->keepalive_probes && icsk->icsk_probes_out >= tp->keepalive_probes)) {
495 tcp_send_active_reset(sk, GFP_ATOMIC); 497 tcp_send_active_reset(sk, GFP_ATOMIC);
496 tcp_write_err(sk); 498 tcp_write_err(sk);
497 goto out; 499 goto out;
498 } 500 }
499 if (tcp_write_wakeup(sk) <= 0) { 501 if (tcp_write_wakeup(sk) <= 0) {
500 tp->probes_out++; 502 icsk->icsk_probes_out++;
501 elapsed = keepalive_intvl_when(tp); 503 elapsed = keepalive_intvl_when(tp);
502 } else { 504 } else {
503 /* If keepalive was lost due to local congestion, 505 /* If keepalive was lost due to local congestion,
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 9bd443db5193..054de24efee2 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -82,9 +82,10 @@ struct vegas {
82 * Instead we must wait until the completion of an RTT during 82 * Instead we must wait until the completion of an RTT during
83 * which we actually receive ACKs. 83 * which we actually receive ACKs.
84 */ 84 */
85static inline void vegas_enable(struct tcp_sock *tp) 85static inline void vegas_enable(struct sock *sk)
86{ 86{
87 struct vegas *vegas = tcp_ca(tp); 87 const struct tcp_sock *tp = tcp_sk(sk);
88 struct vegas *vegas = inet_csk_ca(sk);
88 89
89 /* Begin taking Vegas samples next time we send something. */ 90 /* Begin taking Vegas samples next time we send something. */
90 vegas->doing_vegas_now = 1; 91 vegas->doing_vegas_now = 1;
@@ -97,19 +98,19 @@ static inline void vegas_enable(struct tcp_sock *tp)
97} 98}
98 99
99/* Stop taking Vegas samples for now. */ 100/* Stop taking Vegas samples for now. */
100static inline void vegas_disable(struct tcp_sock *tp) 101static inline void vegas_disable(struct sock *sk)
101{ 102{
102 struct vegas *vegas = tcp_ca(tp); 103 struct vegas *vegas = inet_csk_ca(sk);
103 104
104 vegas->doing_vegas_now = 0; 105 vegas->doing_vegas_now = 0;
105} 106}
106 107
107static void tcp_vegas_init(struct tcp_sock *tp) 108static void tcp_vegas_init(struct sock *sk)
108{ 109{
109 struct vegas *vegas = tcp_ca(tp); 110 struct vegas *vegas = inet_csk_ca(sk);
110 111
111 vegas->baseRTT = 0x7fffffff; 112 vegas->baseRTT = 0x7fffffff;
112 vegas_enable(tp); 113 vegas_enable(sk);
113} 114}
114 115
115/* Do RTT sampling needed for Vegas. 116/* Do RTT sampling needed for Vegas.
@@ -120,9 +121,9 @@ static void tcp_vegas_init(struct tcp_sock *tp)
120 * o min-filter RTT samples from a much longer window (forever for now) 121 * o min-filter RTT samples from a much longer window (forever for now)
121 * to find the propagation delay (baseRTT) 122 * to find the propagation delay (baseRTT)
122 */ 123 */
123static void tcp_vegas_rtt_calc(struct tcp_sock *tp, u32 usrtt) 124static void tcp_vegas_rtt_calc(struct sock *sk, u32 usrtt)
124{ 125{
125 struct vegas *vegas = tcp_ca(tp); 126 struct vegas *vegas = inet_csk_ca(sk);
126 u32 vrtt = usrtt + 1; /* Never allow zero rtt or baseRTT */ 127 u32 vrtt = usrtt + 1; /* Never allow zero rtt or baseRTT */
127 128
128 /* Filter to find propagation delay: */ 129 /* Filter to find propagation delay: */
@@ -136,13 +137,13 @@ static void tcp_vegas_rtt_calc(struct tcp_sock *tp, u32 usrtt)
136 vegas->cntRTT++; 137 vegas->cntRTT++;
137} 138}
138 139
139static void tcp_vegas_state(struct tcp_sock *tp, u8 ca_state) 140static void tcp_vegas_state(struct sock *sk, u8 ca_state)
140{ 141{
141 142
142 if (ca_state == TCP_CA_Open) 143 if (ca_state == TCP_CA_Open)
143 vegas_enable(tp); 144 vegas_enable(sk);
144 else 145 else
145 vegas_disable(tp); 146 vegas_disable(sk);
146} 147}
147 148
148/* 149/*
@@ -154,20 +155,21 @@ static void tcp_vegas_state(struct tcp_sock *tp, u8 ca_state)
154 * packets, _then_ we can make Vegas calculations 155 * packets, _then_ we can make Vegas calculations
155 * again. 156 * again.
156 */ 157 */
157static void tcp_vegas_cwnd_event(struct tcp_sock *tp, enum tcp_ca_event event) 158static void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
158{ 159{
159 if (event == CA_EVENT_CWND_RESTART || 160 if (event == CA_EVENT_CWND_RESTART ||
160 event == CA_EVENT_TX_START) 161 event == CA_EVENT_TX_START)
161 tcp_vegas_init(tp); 162 tcp_vegas_init(sk);
162} 163}
163 164
164static void tcp_vegas_cong_avoid(struct tcp_sock *tp, u32 ack, 165static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
165 u32 seq_rtt, u32 in_flight, int flag) 166 u32 seq_rtt, u32 in_flight, int flag)
166{ 167{
167 struct vegas *vegas = tcp_ca(tp); 168 struct tcp_sock *tp = tcp_sk(sk);
169 struct vegas *vegas = inet_csk_ca(sk);
168 170
169 if (!vegas->doing_vegas_now) 171 if (!vegas->doing_vegas_now)
170 return tcp_reno_cong_avoid(tp, ack, seq_rtt, in_flight, flag); 172 return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
171 173
172 /* The key players are v_beg_snd_una and v_beg_snd_nxt. 174 /* The key players are v_beg_snd_una and v_beg_snd_nxt.
173 * 175 *
@@ -219,7 +221,7 @@ static void tcp_vegas_cong_avoid(struct tcp_sock *tp, u32 ack,
219 * but that's not too awful, since we're taking the min, 221 * but that's not too awful, since we're taking the min,
220 * rather than averaging. 222 * rather than averaging.
221 */ 223 */
222 tcp_vegas_rtt_calc(tp, seq_rtt*1000); 224 tcp_vegas_rtt_calc(sk, seq_rtt * 1000);
223 225
224 /* We do the Vegas calculations only if we got enough RTT 226 /* We do the Vegas calculations only if we got enough RTT
225 * samples that we can be reasonably sure that we got 227 * samples that we can be reasonably sure that we got
@@ -359,10 +361,10 @@ static void tcp_vegas_cong_avoid(struct tcp_sock *tp, u32 ack,
359} 361}
360 362
361/* Extract info for Tcp socket info provided via netlink. */ 363/* Extract info for Tcp socket info provided via netlink. */
362static void tcp_vegas_get_info(struct tcp_sock *tp, u32 ext, 364static void tcp_vegas_get_info(struct sock *sk, u32 ext,
363 struct sk_buff *skb) 365 struct sk_buff *skb)
364{ 366{
365 const struct vegas *ca = tcp_ca(tp); 367 const struct vegas *ca = inet_csk_ca(sk);
366 if (ext & (1<<(TCPDIAG_VEGASINFO-1))) { 368 if (ext & (1<<(TCPDIAG_VEGASINFO-1))) {
367 struct tcpvegas_info *info; 369 struct tcpvegas_info *info;
368 370
@@ -393,7 +395,7 @@ static struct tcp_congestion_ops tcp_vegas = {
393 395
394static int __init tcp_vegas_register(void) 396static int __init tcp_vegas_register(void)
395{ 397{
396 BUG_ON(sizeof(struct vegas) > TCP_CA_PRIV_SIZE); 398 BUG_ON(sizeof(struct vegas) > ICSK_CA_PRIV_SIZE);
397 tcp_register_congestion_control(&tcp_vegas); 399 tcp_register_congestion_control(&tcp_vegas);
398 return 0; 400 return 0;
399} 401}
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index ef827242c940..d8a5a2b92e37 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -40,9 +40,9 @@ struct westwood {
40 * way as soon as possible. It will reasonably happen within the first 40 * way as soon as possible. It will reasonably happen within the first
41 * RTT period of the connection lifetime. 41 * RTT period of the connection lifetime.
42 */ 42 */
43static void tcp_westwood_init(struct tcp_sock *tp) 43static void tcp_westwood_init(struct sock *sk)
44{ 44{
45 struct westwood *w = tcp_ca(tp); 45 struct westwood *w = inet_csk_ca(sk);
46 46
47 w->bk = 0; 47 w->bk = 0;
48 w->bw_ns_est = 0; 48 w->bw_ns_est = 0;
@@ -51,7 +51,7 @@ static void tcp_westwood_init(struct tcp_sock *tp)
51 w->cumul_ack = 0; 51 w->cumul_ack = 0;
52 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT; 52 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
53 w->rtt_win_sx = tcp_time_stamp; 53 w->rtt_win_sx = tcp_time_stamp;
54 w->snd_una = tp->snd_una; 54 w->snd_una = tcp_sk(sk)->snd_una;
55} 55}
56 56
57/* 57/*
@@ -74,11 +74,11 @@ static inline void westwood_filter(struct westwood *w, u32 delta)
74 * Called after processing group of packets. 74 * Called after processing group of packets.
75 * but all westwood needs is the last sample of srtt. 75 * but all westwood needs is the last sample of srtt.
76 */ 76 */
77static void tcp_westwood_pkts_acked(struct tcp_sock *tp, u32 cnt) 77static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt)
78{ 78{
79 struct westwood *w = tcp_ca(tp); 79 struct westwood *w = inet_csk_ca(sk);
80 if (cnt > 0) 80 if (cnt > 0)
81 w->rtt = tp->srtt >> 3; 81 w->rtt = tcp_sk(sk)->srtt >> 3;
82} 82}
83 83
84/* 84/*
@@ -86,9 +86,9 @@ static void tcp_westwood_pkts_acked(struct tcp_sock *tp, u32 cnt)
86 * It updates RTT evaluation window if it is the right moment to do 86 * It updates RTT evaluation window if it is the right moment to do
87 * it. If so it calls filter for evaluating bandwidth. 87 * it. If so it calls filter for evaluating bandwidth.
88 */ 88 */
89static void westwood_update_window(struct tcp_sock *tp) 89static void westwood_update_window(struct sock *sk)
90{ 90{
91 struct westwood *w = tcp_ca(tp); 91 struct westwood *w = inet_csk_ca(sk);
92 s32 delta = tcp_time_stamp - w->rtt_win_sx; 92 s32 delta = tcp_time_stamp - w->rtt_win_sx;
93 93
94 /* 94 /*
@@ -114,11 +114,12 @@ static void westwood_update_window(struct tcp_sock *tp)
114 * header prediction is successful. In such case in fact update is 114 * header prediction is successful. In such case in fact update is
115 * straight forward and doesn't need any particular care. 115 * straight forward and doesn't need any particular care.
116 */ 116 */
117static inline void westwood_fast_bw(struct tcp_sock *tp) 117static inline void westwood_fast_bw(struct sock *sk)
118{ 118{
119 struct westwood *w = tcp_ca(tp); 119 const struct tcp_sock *tp = tcp_sk(sk);
120 struct westwood *w = inet_csk_ca(sk);
120 121
121 westwood_update_window(tp); 122 westwood_update_window(sk);
122 123
123 w->bk += tp->snd_una - w->snd_una; 124 w->bk += tp->snd_una - w->snd_una;
124 w->snd_una = tp->snd_una; 125 w->snd_una = tp->snd_una;
@@ -130,9 +131,10 @@ static inline void westwood_fast_bw(struct tcp_sock *tp)
130 * This function evaluates cumul_ack for evaluating bk in case of 131 * This function evaluates cumul_ack for evaluating bk in case of
131 * delayed or partial acks. 132 * delayed or partial acks.
132 */ 133 */
133static inline u32 westwood_acked_count(struct tcp_sock *tp) 134static inline u32 westwood_acked_count(struct sock *sk)
134{ 135{
135 struct westwood *w = tcp_ca(tp); 136 const struct tcp_sock *tp = tcp_sk(sk);
137 struct westwood *w = inet_csk_ca(sk);
136 138
137 w->cumul_ack = tp->snd_una - w->snd_una; 139 w->cumul_ack = tp->snd_una - w->snd_una;
138 140
@@ -160,9 +162,10 @@ static inline u32 westwood_acked_count(struct tcp_sock *tp)
160 return w->cumul_ack; 162 return w->cumul_ack;
161} 163}
162 164
163static inline u32 westwood_bw_rttmin(const struct tcp_sock *tp) 165static inline u32 westwood_bw_rttmin(const struct sock *sk)
164{ 166{
165 struct westwood *w = tcp_ca(tp); 167 const struct tcp_sock *tp = tcp_sk(sk);
168 const struct westwood *w = inet_csk_ca(sk);
166 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2); 169 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
167} 170}
168 171
@@ -172,31 +175,32 @@ static inline u32 westwood_bw_rttmin(const struct tcp_sock *tp)
172 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2 175 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
173 * so avoids ever returning 0. 176 * so avoids ever returning 0.
174 */ 177 */
175static u32 tcp_westwood_cwnd_min(struct tcp_sock *tp) 178static u32 tcp_westwood_cwnd_min(struct sock *sk)
176{ 179{
177 return westwood_bw_rttmin(tp); 180 return westwood_bw_rttmin(sk);
178} 181}
179 182
180static void tcp_westwood_event(struct tcp_sock *tp, enum tcp_ca_event event) 183static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
181{ 184{
182 struct westwood *w = tcp_ca(tp); 185 struct tcp_sock *tp = tcp_sk(sk);
186 struct westwood *w = inet_csk_ca(sk);
183 187
184 switch(event) { 188 switch(event) {
185 case CA_EVENT_FAST_ACK: 189 case CA_EVENT_FAST_ACK:
186 westwood_fast_bw(tp); 190 westwood_fast_bw(sk);
187 break; 191 break;
188 192
189 case CA_EVENT_COMPLETE_CWR: 193 case CA_EVENT_COMPLETE_CWR:
190 tp->snd_cwnd = tp->snd_ssthresh = westwood_bw_rttmin(tp); 194 tp->snd_cwnd = tp->snd_ssthresh = westwood_bw_rttmin(sk);
191 break; 195 break;
192 196
193 case CA_EVENT_FRTO: 197 case CA_EVENT_FRTO:
194 tp->snd_ssthresh = westwood_bw_rttmin(tp); 198 tp->snd_ssthresh = westwood_bw_rttmin(sk);
195 break; 199 break;
196 200
197 case CA_EVENT_SLOW_ACK: 201 case CA_EVENT_SLOW_ACK:
198 westwood_update_window(tp); 202 westwood_update_window(sk);
199 w->bk += westwood_acked_count(tp); 203 w->bk += westwood_acked_count(sk);
200 w->rtt_min = min(w->rtt, w->rtt_min); 204 w->rtt_min = min(w->rtt, w->rtt_min);
201 break; 205 break;
202 206
@@ -208,10 +212,10 @@ static void tcp_westwood_event(struct tcp_sock *tp, enum tcp_ca_event event)
208 212
209 213
210/* Extract info for Tcp socket info provided via netlink. */ 214/* Extract info for Tcp socket info provided via netlink. */
211static void tcp_westwood_info(struct tcp_sock *tp, u32 ext, 215static void tcp_westwood_info(struct sock *sk, u32 ext,
212 struct sk_buff *skb) 216 struct sk_buff *skb)
213{ 217{
214 const struct westwood *ca = tcp_ca(tp); 218 const struct westwood *ca = inet_csk_ca(sk);
215 if (ext & (1<<(TCPDIAG_VEGASINFO-1))) { 219 if (ext & (1<<(TCPDIAG_VEGASINFO-1))) {
216 struct rtattr *rta; 220 struct rtattr *rta;
217 struct tcpvegas_info *info; 221 struct tcpvegas_info *info;
@@ -242,7 +246,7 @@ static struct tcp_congestion_ops tcp_westwood = {
242 246
243static int __init tcp_westwood_register(void) 247static int __init tcp_westwood_register(void)
244{ 248{
245 BUG_ON(sizeof(struct westwood) > TCP_CA_PRIV_SIZE); 249 BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
246 return tcp_register_congestion_control(&tcp_westwood); 250 return tcp_register_congestion_control(&tcp_westwood);
247} 251}
248 252