diff options
author | Steve French <sfrench@us.ibm.com> | 2005-08-30 14:33:26 -0400 |
---|---|---|
committer | Steve French <sfrench@us.ibm.com> | 2005-08-30 14:33:26 -0400 |
commit | 2016ef789a9ded2e169ad1c028ae3deb5302571f (patch) | |
tree | 601359f15b42d4d9868b4eadfe909a7bef6435c5 /net/ipv4/tcp_timer.c | |
parent | 7f57356b70dda014ef269135942426e4a852023e (diff) | |
parent | 6b39374a27eb4be7e9d82145ae270ba02ea90dc8 (diff) |
Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'net/ipv4/tcp_timer.c')
-rw-r--r-- | net/ipv4/tcp_timer.c | 253 |
1 files changed, 65 insertions, 188 deletions
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 0084227438c2..415ee47ac1c5 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -36,49 +36,13 @@ static void tcp_write_timer(unsigned long); | |||
36 | static void tcp_delack_timer(unsigned long); | 36 | static void tcp_delack_timer(unsigned long); |
37 | static void tcp_keepalive_timer (unsigned long data); | 37 | static void tcp_keepalive_timer (unsigned long data); |
38 | 38 | ||
39 | #ifdef TCP_DEBUG | ||
40 | const char tcp_timer_bug_msg[] = KERN_DEBUG "tcpbug: unknown timer value\n"; | ||
41 | EXPORT_SYMBOL(tcp_timer_bug_msg); | ||
42 | #endif | ||
43 | |||
44 | /* | ||
45 | * Using different timers for retransmit, delayed acks and probes | ||
46 | * We may wish use just one timer maintaining a list of expire jiffies | ||
47 | * to optimize. | ||
48 | */ | ||
49 | |||
50 | void tcp_init_xmit_timers(struct sock *sk) | 39 | void tcp_init_xmit_timers(struct sock *sk) |
51 | { | 40 | { |
52 | struct tcp_sock *tp = tcp_sk(sk); | 41 | inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, |
53 | 42 | &tcp_keepalive_timer); | |
54 | init_timer(&tp->retransmit_timer); | ||
55 | tp->retransmit_timer.function=&tcp_write_timer; | ||
56 | tp->retransmit_timer.data = (unsigned long) sk; | ||
57 | tp->pending = 0; | ||
58 | |||
59 | init_timer(&tp->delack_timer); | ||
60 | tp->delack_timer.function=&tcp_delack_timer; | ||
61 | tp->delack_timer.data = (unsigned long) sk; | ||
62 | tp->ack.pending = 0; | ||
63 | |||
64 | init_timer(&sk->sk_timer); | ||
65 | sk->sk_timer.function = &tcp_keepalive_timer; | ||
66 | sk->sk_timer.data = (unsigned long)sk; | ||
67 | } | 43 | } |
68 | 44 | ||
69 | void tcp_clear_xmit_timers(struct sock *sk) | 45 | EXPORT_SYMBOL(tcp_init_xmit_timers); |
70 | { | ||
71 | struct tcp_sock *tp = tcp_sk(sk); | ||
72 | |||
73 | tp->pending = 0; | ||
74 | sk_stop_timer(sk, &tp->retransmit_timer); | ||
75 | |||
76 | tp->ack.pending = 0; | ||
77 | tp->ack.blocked = 0; | ||
78 | sk_stop_timer(sk, &tp->delack_timer); | ||
79 | |||
80 | sk_stop_timer(sk, &sk->sk_timer); | ||
81 | } | ||
82 | 46 | ||
83 | static void tcp_write_err(struct sock *sk) | 47 | static void tcp_write_err(struct sock *sk) |
84 | { | 48 | { |
@@ -155,15 +119,15 @@ static int tcp_orphan_retries(struct sock *sk, int alive) | |||
155 | /* A write timeout has occurred. Process the after effects. */ | 119 | /* A write timeout has occurred. Process the after effects. */ |
156 | static int tcp_write_timeout(struct sock *sk) | 120 | static int tcp_write_timeout(struct sock *sk) |
157 | { | 121 | { |
158 | struct tcp_sock *tp = tcp_sk(sk); | 122 | const struct inet_connection_sock *icsk = inet_csk(sk); |
159 | int retry_until; | 123 | int retry_until; |
160 | 124 | ||
161 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 125 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
162 | if (tp->retransmits) | 126 | if (icsk->icsk_retransmits) |
163 | dst_negative_advice(&sk->sk_dst_cache); | 127 | dst_negative_advice(&sk->sk_dst_cache); |
164 | retry_until = tp->syn_retries ? : sysctl_tcp_syn_retries; | 128 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
165 | } else { | 129 | } else { |
166 | if (tp->retransmits >= sysctl_tcp_retries1) { | 130 | if (icsk->icsk_retransmits >= sysctl_tcp_retries1) { |
167 | /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black | 131 | /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black |
168 | hole detection. :-( | 132 | hole detection. :-( |
169 | 133 | ||
@@ -189,16 +153,16 @@ static int tcp_write_timeout(struct sock *sk) | |||
189 | 153 | ||
190 | retry_until = sysctl_tcp_retries2; | 154 | retry_until = sysctl_tcp_retries2; |
191 | if (sock_flag(sk, SOCK_DEAD)) { | 155 | if (sock_flag(sk, SOCK_DEAD)) { |
192 | int alive = (tp->rto < TCP_RTO_MAX); | 156 | const int alive = (icsk->icsk_rto < TCP_RTO_MAX); |
193 | 157 | ||
194 | retry_until = tcp_orphan_retries(sk, alive); | 158 | retry_until = tcp_orphan_retries(sk, alive); |
195 | 159 | ||
196 | if (tcp_out_of_resources(sk, alive || tp->retransmits < retry_until)) | 160 | if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until)) |
197 | return 1; | 161 | return 1; |
198 | } | 162 | } |
199 | } | 163 | } |
200 | 164 | ||
201 | if (tp->retransmits >= retry_until) { | 165 | if (icsk->icsk_retransmits >= retry_until) { |
202 | /* Has it gone just too far? */ | 166 | /* Has it gone just too far? */ |
203 | tcp_write_err(sk); | 167 | tcp_write_err(sk); |
204 | return 1; | 168 | return 1; |
@@ -210,26 +174,27 @@ static void tcp_delack_timer(unsigned long data) | |||
210 | { | 174 | { |
211 | struct sock *sk = (struct sock*)data; | 175 | struct sock *sk = (struct sock*)data; |
212 | struct tcp_sock *tp = tcp_sk(sk); | 176 | struct tcp_sock *tp = tcp_sk(sk); |
177 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
213 | 178 | ||
214 | bh_lock_sock(sk); | 179 | bh_lock_sock(sk); |
215 | if (sock_owned_by_user(sk)) { | 180 | if (sock_owned_by_user(sk)) { |
216 | /* Try again later. */ | 181 | /* Try again later. */ |
217 | tp->ack.blocked = 1; | 182 | icsk->icsk_ack.blocked = 1; |
218 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); | 183 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); |
219 | sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN); | 184 | sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); |
220 | goto out_unlock; | 185 | goto out_unlock; |
221 | } | 186 | } |
222 | 187 | ||
223 | sk_stream_mem_reclaim(sk); | 188 | sk_stream_mem_reclaim(sk); |
224 | 189 | ||
225 | if (sk->sk_state == TCP_CLOSE || !(tp->ack.pending & TCP_ACK_TIMER)) | 190 | if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) |
226 | goto out; | 191 | goto out; |
227 | 192 | ||
228 | if (time_after(tp->ack.timeout, jiffies)) { | 193 | if (time_after(icsk->icsk_ack.timeout, jiffies)) { |
229 | sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout); | 194 | sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); |
230 | goto out; | 195 | goto out; |
231 | } | 196 | } |
232 | tp->ack.pending &= ~TCP_ACK_TIMER; | 197 | icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; |
233 | 198 | ||
234 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { | 199 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { |
235 | struct sk_buff *skb; | 200 | struct sk_buff *skb; |
@@ -242,16 +207,16 @@ static void tcp_delack_timer(unsigned long data) | |||
242 | tp->ucopy.memory = 0; | 207 | tp->ucopy.memory = 0; |
243 | } | 208 | } |
244 | 209 | ||
245 | if (tcp_ack_scheduled(tp)) { | 210 | if (inet_csk_ack_scheduled(sk)) { |
246 | if (!tp->ack.pingpong) { | 211 | if (!icsk->icsk_ack.pingpong) { |
247 | /* Delayed ACK missed: inflate ATO. */ | 212 | /* Delayed ACK missed: inflate ATO. */ |
248 | tp->ack.ato = min(tp->ack.ato << 1, tp->rto); | 213 | icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); |
249 | } else { | 214 | } else { |
250 | /* Delayed ACK missed: leave pingpong mode and | 215 | /* Delayed ACK missed: leave pingpong mode and |
251 | * deflate ATO. | 216 | * deflate ATO. |
252 | */ | 217 | */ |
253 | tp->ack.pingpong = 0; | 218 | icsk->icsk_ack.pingpong = 0; |
254 | tp->ack.ato = TCP_ATO_MIN; | 219 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
255 | } | 220 | } |
256 | tcp_send_ack(sk); | 221 | tcp_send_ack(sk); |
257 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); | 222 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); |
@@ -268,11 +233,12 @@ out_unlock: | |||
268 | 233 | ||
269 | static void tcp_probe_timer(struct sock *sk) | 234 | static void tcp_probe_timer(struct sock *sk) |
270 | { | 235 | { |
236 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
271 | struct tcp_sock *tp = tcp_sk(sk); | 237 | struct tcp_sock *tp = tcp_sk(sk); |
272 | int max_probes; | 238 | int max_probes; |
273 | 239 | ||
274 | if (tp->packets_out || !sk->sk_send_head) { | 240 | if (tp->packets_out || !sk->sk_send_head) { |
275 | tp->probes_out = 0; | 241 | icsk->icsk_probes_out = 0; |
276 | return; | 242 | return; |
277 | } | 243 | } |
278 | 244 | ||
@@ -283,7 +249,7 @@ static void tcp_probe_timer(struct sock *sk) | |||
283 | * FIXME: We ought not to do it, Solaris 2.5 actually has fixing | 249 | * FIXME: We ought not to do it, Solaris 2.5 actually has fixing |
284 | * this behaviour in Solaris down as a bug fix. [AC] | 250 | * this behaviour in Solaris down as a bug fix. [AC] |
285 | * | 251 | * |
286 | * Let me to explain. probes_out is zeroed by incoming ACKs | 252 | * Let me to explain. icsk_probes_out is zeroed by incoming ACKs |
287 | * even if they advertise zero window. Hence, connection is killed only | 253 | * even if they advertise zero window. Hence, connection is killed only |
288 | * if we received no ACKs for normal connection timeout. It is not killed | 254 | * if we received no ACKs for normal connection timeout. It is not killed |
289 | * only because window stays zero for some time, window may be zero | 255 | * only because window stays zero for some time, window may be zero |
@@ -294,15 +260,15 @@ static void tcp_probe_timer(struct sock *sk) | |||
294 | max_probes = sysctl_tcp_retries2; | 260 | max_probes = sysctl_tcp_retries2; |
295 | 261 | ||
296 | if (sock_flag(sk, SOCK_DEAD)) { | 262 | if (sock_flag(sk, SOCK_DEAD)) { |
297 | int alive = ((tp->rto<<tp->backoff) < TCP_RTO_MAX); | 263 | const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); |
298 | 264 | ||
299 | max_probes = tcp_orphan_retries(sk, alive); | 265 | max_probes = tcp_orphan_retries(sk, alive); |
300 | 266 | ||
301 | if (tcp_out_of_resources(sk, alive || tp->probes_out <= max_probes)) | 267 | if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes)) |
302 | return; | 268 | return; |
303 | } | 269 | } |
304 | 270 | ||
305 | if (tp->probes_out > max_probes) { | 271 | if (icsk->icsk_probes_out > max_probes) { |
306 | tcp_write_err(sk); | 272 | tcp_write_err(sk); |
307 | } else { | 273 | } else { |
308 | /* Only send another probe if we didn't close things up. */ | 274 | /* Only send another probe if we didn't close things up. */ |
@@ -317,6 +283,7 @@ static void tcp_probe_timer(struct sock *sk) | |||
317 | static void tcp_retransmit_timer(struct sock *sk) | 283 | static void tcp_retransmit_timer(struct sock *sk) |
318 | { | 284 | { |
319 | struct tcp_sock *tp = tcp_sk(sk); | 285 | struct tcp_sock *tp = tcp_sk(sk); |
286 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
320 | 287 | ||
321 | if (!tp->packets_out) | 288 | if (!tp->packets_out) |
322 | goto out; | 289 | goto out; |
@@ -351,20 +318,21 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
351 | if (tcp_write_timeout(sk)) | 318 | if (tcp_write_timeout(sk)) |
352 | goto out; | 319 | goto out; |
353 | 320 | ||
354 | if (tp->retransmits == 0) { | 321 | if (icsk->icsk_retransmits == 0) { |
355 | if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) { | 322 | if (icsk->icsk_ca_state == TCP_CA_Disorder || |
323 | icsk->icsk_ca_state == TCP_CA_Recovery) { | ||
356 | if (tp->rx_opt.sack_ok) { | 324 | if (tp->rx_opt.sack_ok) { |
357 | if (tp->ca_state == TCP_CA_Recovery) | 325 | if (icsk->icsk_ca_state == TCP_CA_Recovery) |
358 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); | 326 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); |
359 | else | 327 | else |
360 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES); | 328 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES); |
361 | } else { | 329 | } else { |
362 | if (tp->ca_state == TCP_CA_Recovery) | 330 | if (icsk->icsk_ca_state == TCP_CA_Recovery) |
363 | NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL); | 331 | NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL); |
364 | else | 332 | else |
365 | NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES); | 333 | NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES); |
366 | } | 334 | } |
367 | } else if (tp->ca_state == TCP_CA_Loss) { | 335 | } else if (icsk->icsk_ca_state == TCP_CA_Loss) { |
368 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES); | 336 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES); |
369 | } else { | 337 | } else { |
370 | NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS); | 338 | NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS); |
@@ -381,10 +349,11 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
381 | /* Retransmission failed because of local congestion, | 349 | /* Retransmission failed because of local congestion, |
382 | * do not backoff. | 350 | * do not backoff. |
383 | */ | 351 | */ |
384 | if (!tp->retransmits) | 352 | if (!icsk->icsk_retransmits) |
385 | tp->retransmits=1; | 353 | icsk->icsk_retransmits = 1; |
386 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, | 354 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
387 | min(tp->rto, TCP_RESOURCE_PROBE_INTERVAL)); | 355 | min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL), |
356 | TCP_RTO_MAX); | ||
388 | goto out; | 357 | goto out; |
389 | } | 358 | } |
390 | 359 | ||
@@ -403,13 +372,13 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
403 | * implemented ftp to mars will work nicely. We will have to fix | 372 | * implemented ftp to mars will work nicely. We will have to fix |
404 | * the 120 second clamps though! | 373 | * the 120 second clamps though! |
405 | */ | 374 | */ |
406 | tp->backoff++; | 375 | icsk->icsk_backoff++; |
407 | tp->retransmits++; | 376 | icsk->icsk_retransmits++; |
408 | 377 | ||
409 | out_reset_timer: | 378 | out_reset_timer: |
410 | tp->rto = min(tp->rto << 1, TCP_RTO_MAX); | 379 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); |
411 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 380 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); |
412 | if (tp->retransmits > sysctl_tcp_retries1) | 381 | if (icsk->icsk_retransmits > sysctl_tcp_retries1) |
413 | __sk_dst_reset(sk); | 382 | __sk_dst_reset(sk); |
414 | 383 | ||
415 | out:; | 384 | out:; |
@@ -418,32 +387,32 @@ out:; | |||
418 | static void tcp_write_timer(unsigned long data) | 387 | static void tcp_write_timer(unsigned long data) |
419 | { | 388 | { |
420 | struct sock *sk = (struct sock*)data; | 389 | struct sock *sk = (struct sock*)data; |
421 | struct tcp_sock *tp = tcp_sk(sk); | 390 | struct inet_connection_sock *icsk = inet_csk(sk); |
422 | int event; | 391 | int event; |
423 | 392 | ||
424 | bh_lock_sock(sk); | 393 | bh_lock_sock(sk); |
425 | if (sock_owned_by_user(sk)) { | 394 | if (sock_owned_by_user(sk)) { |
426 | /* Try again later */ | 395 | /* Try again later */ |
427 | sk_reset_timer(sk, &tp->retransmit_timer, jiffies + (HZ / 20)); | 396 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20)); |
428 | goto out_unlock; | 397 | goto out_unlock; |
429 | } | 398 | } |
430 | 399 | ||
431 | if (sk->sk_state == TCP_CLOSE || !tp->pending) | 400 | if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) |
432 | goto out; | 401 | goto out; |
433 | 402 | ||
434 | if (time_after(tp->timeout, jiffies)) { | 403 | if (time_after(icsk->icsk_timeout, jiffies)) { |
435 | sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout); | 404 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); |
436 | goto out; | 405 | goto out; |
437 | } | 406 | } |
438 | 407 | ||
439 | event = tp->pending; | 408 | event = icsk->icsk_pending; |
440 | tp->pending = 0; | 409 | icsk->icsk_pending = 0; |
441 | 410 | ||
442 | switch (event) { | 411 | switch (event) { |
443 | case TCP_TIME_RETRANS: | 412 | case ICSK_TIME_RETRANS: |
444 | tcp_retransmit_timer(sk); | 413 | tcp_retransmit_timer(sk); |
445 | break; | 414 | break; |
446 | case TCP_TIME_PROBE0: | 415 | case ICSK_TIME_PROBE0: |
447 | tcp_probe_timer(sk); | 416 | tcp_probe_timer(sk); |
448 | break; | 417 | break; |
449 | } | 418 | } |
@@ -462,96 +431,8 @@ out_unlock: | |||
462 | 431 | ||
463 | static void tcp_synack_timer(struct sock *sk) | 432 | static void tcp_synack_timer(struct sock *sk) |
464 | { | 433 | { |
465 | struct tcp_sock *tp = tcp_sk(sk); | 434 | inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, |
466 | struct listen_sock *lopt = tp->accept_queue.listen_opt; | 435 | TCP_TIMEOUT_INIT, TCP_RTO_MAX); |
467 | int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries; | ||
468 | int thresh = max_retries; | ||
469 | unsigned long now = jiffies; | ||
470 | struct request_sock **reqp, *req; | ||
471 | int i, budget; | ||
472 | |||
473 | if (lopt == NULL || lopt->qlen == 0) | ||
474 | return; | ||
475 | |||
476 | /* Normally all the openreqs are young and become mature | ||
477 | * (i.e. converted to established socket) for first timeout. | ||
478 | * If synack was not acknowledged for 3 seconds, it means | ||
479 | * one of the following things: synack was lost, ack was lost, | ||
480 | * rtt is high or nobody planned to ack (i.e. synflood). | ||
481 | * When server is a bit loaded, queue is populated with old | ||
482 | * open requests, reducing effective size of queue. | ||
483 | * When server is well loaded, queue size reduces to zero | ||
484 | * after several minutes of work. It is not synflood, | ||
485 | * it is normal operation. The solution is pruning | ||
486 | * too old entries overriding normal timeout, when | ||
487 | * situation becomes dangerous. | ||
488 | * | ||
489 | * Essentially, we reserve half of room for young | ||
490 | * embrions; and abort old ones without pity, if old | ||
491 | * ones are about to clog our table. | ||
492 | */ | ||
493 | if (lopt->qlen>>(lopt->max_qlen_log-1)) { | ||
494 | int young = (lopt->qlen_young<<1); | ||
495 | |||
496 | while (thresh > 2) { | ||
497 | if (lopt->qlen < young) | ||
498 | break; | ||
499 | thresh--; | ||
500 | young <<= 1; | ||
501 | } | ||
502 | } | ||
503 | |||
504 | if (tp->defer_accept) | ||
505 | max_retries = tp->defer_accept; | ||
506 | |||
507 | budget = 2*(TCP_SYNQ_HSIZE/(TCP_TIMEOUT_INIT/TCP_SYNQ_INTERVAL)); | ||
508 | i = lopt->clock_hand; | ||
509 | |||
510 | do { | ||
511 | reqp=&lopt->syn_table[i]; | ||
512 | while ((req = *reqp) != NULL) { | ||
513 | if (time_after_eq(now, req->expires)) { | ||
514 | if ((req->retrans < thresh || | ||
515 | (inet_rsk(req)->acked && req->retrans < max_retries)) | ||
516 | && !req->rsk_ops->rtx_syn_ack(sk, req, NULL)) { | ||
517 | unsigned long timeo; | ||
518 | |||
519 | if (req->retrans++ == 0) | ||
520 | lopt->qlen_young--; | ||
521 | timeo = min((TCP_TIMEOUT_INIT << req->retrans), | ||
522 | TCP_RTO_MAX); | ||
523 | req->expires = now + timeo; | ||
524 | reqp = &req->dl_next; | ||
525 | continue; | ||
526 | } | ||
527 | |||
528 | /* Drop this request */ | ||
529 | tcp_synq_unlink(tp, req, reqp); | ||
530 | reqsk_queue_removed(&tp->accept_queue, req); | ||
531 | reqsk_free(req); | ||
532 | continue; | ||
533 | } | ||
534 | reqp = &req->dl_next; | ||
535 | } | ||
536 | |||
537 | i = (i+1)&(TCP_SYNQ_HSIZE-1); | ||
538 | |||
539 | } while (--budget > 0); | ||
540 | |||
541 | lopt->clock_hand = i; | ||
542 | |||
543 | if (lopt->qlen) | ||
544 | tcp_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL); | ||
545 | } | ||
546 | |||
547 | void tcp_delete_keepalive_timer (struct sock *sk) | ||
548 | { | ||
549 | sk_stop_timer(sk, &sk->sk_timer); | ||
550 | } | ||
551 | |||
552 | void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len) | ||
553 | { | ||
554 | sk_reset_timer(sk, &sk->sk_timer, jiffies + len); | ||
555 | } | 436 | } |
556 | 437 | ||
557 | void tcp_set_keepalive(struct sock *sk, int val) | 438 | void tcp_set_keepalive(struct sock *sk, int val) |
@@ -560,15 +441,16 @@ void tcp_set_keepalive(struct sock *sk, int val) | |||
560 | return; | 441 | return; |
561 | 442 | ||
562 | if (val && !sock_flag(sk, SOCK_KEEPOPEN)) | 443 | if (val && !sock_flag(sk, SOCK_KEEPOPEN)) |
563 | tcp_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); | 444 | inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); |
564 | else if (!val) | 445 | else if (!val) |
565 | tcp_delete_keepalive_timer(sk); | 446 | inet_csk_delete_keepalive_timer(sk); |
566 | } | 447 | } |
567 | 448 | ||
568 | 449 | ||
569 | static void tcp_keepalive_timer (unsigned long data) | 450 | static void tcp_keepalive_timer (unsigned long data) |
570 | { | 451 | { |
571 | struct sock *sk = (struct sock *) data; | 452 | struct sock *sk = (struct sock *) data; |
453 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
572 | struct tcp_sock *tp = tcp_sk(sk); | 454 | struct tcp_sock *tp = tcp_sk(sk); |
573 | __u32 elapsed; | 455 | __u32 elapsed; |
574 | 456 | ||
@@ -576,7 +458,7 @@ static void tcp_keepalive_timer (unsigned long data) | |||
576 | bh_lock_sock(sk); | 458 | bh_lock_sock(sk); |
577 | if (sock_owned_by_user(sk)) { | 459 | if (sock_owned_by_user(sk)) { |
578 | /* Try again later. */ | 460 | /* Try again later. */ |
579 | tcp_reset_keepalive_timer (sk, HZ/20); | 461 | inet_csk_reset_keepalive_timer (sk, HZ/20); |
580 | goto out; | 462 | goto out; |
581 | } | 463 | } |
582 | 464 | ||
@@ -587,7 +469,7 @@ static void tcp_keepalive_timer (unsigned long data) | |||
587 | 469 | ||
588 | if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { | 470 | if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { |
589 | if (tp->linger2 >= 0) { | 471 | if (tp->linger2 >= 0) { |
590 | int tmo = tcp_fin_time(tp) - TCP_TIMEWAIT_LEN; | 472 | const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; |
591 | 473 | ||
592 | if (tmo > 0) { | 474 | if (tmo > 0) { |
593 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | 475 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
@@ -610,14 +492,14 @@ static void tcp_keepalive_timer (unsigned long data) | |||
610 | elapsed = tcp_time_stamp - tp->rcv_tstamp; | 492 | elapsed = tcp_time_stamp - tp->rcv_tstamp; |
611 | 493 | ||
612 | if (elapsed >= keepalive_time_when(tp)) { | 494 | if (elapsed >= keepalive_time_when(tp)) { |
613 | if ((!tp->keepalive_probes && tp->probes_out >= sysctl_tcp_keepalive_probes) || | 495 | if ((!tp->keepalive_probes && icsk->icsk_probes_out >= sysctl_tcp_keepalive_probes) || |
614 | (tp->keepalive_probes && tp->probes_out >= tp->keepalive_probes)) { | 496 | (tp->keepalive_probes && icsk->icsk_probes_out >= tp->keepalive_probes)) { |
615 | tcp_send_active_reset(sk, GFP_ATOMIC); | 497 | tcp_send_active_reset(sk, GFP_ATOMIC); |
616 | tcp_write_err(sk); | 498 | tcp_write_err(sk); |
617 | goto out; | 499 | goto out; |
618 | } | 500 | } |
619 | if (tcp_write_wakeup(sk) <= 0) { | 501 | if (tcp_write_wakeup(sk) <= 0) { |
620 | tp->probes_out++; | 502 | icsk->icsk_probes_out++; |
621 | elapsed = keepalive_intvl_when(tp); | 503 | elapsed = keepalive_intvl_when(tp); |
622 | } else { | 504 | } else { |
623 | /* If keepalive was lost due to local congestion, | 505 | /* If keepalive was lost due to local congestion, |
@@ -634,7 +516,7 @@ static void tcp_keepalive_timer (unsigned long data) | |||
634 | sk_stream_mem_reclaim(sk); | 516 | sk_stream_mem_reclaim(sk); |
635 | 517 | ||
636 | resched: | 518 | resched: |
637 | tcp_reset_keepalive_timer (sk, elapsed); | 519 | inet_csk_reset_keepalive_timer (sk, elapsed); |
638 | goto out; | 520 | goto out; |
639 | 521 | ||
640 | death: | 522 | death: |
@@ -644,8 +526,3 @@ out: | |||
644 | bh_unlock_sock(sk); | 526 | bh_unlock_sock(sk); |
645 | sock_put(sk); | 527 | sock_put(sk); |
646 | } | 528 | } |
647 | |||
648 | EXPORT_SYMBOL(tcp_clear_xmit_timers); | ||
649 | EXPORT_SYMBOL(tcp_delete_keepalive_timer); | ||
650 | EXPORT_SYMBOL(tcp_init_xmit_timers); | ||
651 | EXPORT_SYMBOL(tcp_reset_keepalive_timer); | ||