diff options
author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2005-08-09 23:10:42 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:43:19 -0400 |
commit | 463c84b97f24010a67cd871746d6a7e4c925a5f9 (patch) | |
tree | 48df67ede4ebb5d12b3c0ae55d72531574bd51a6 /net/ipv4/tcp_timer.c | |
parent | 87d11ceb9deb7a3f13fdee6e89d9bb6be7d27a71 (diff) |
[NET]: Introduce inet_connection_sock
This creates struct inet_connection_sock, moving members out of struct
tcp_sock that are shareable with other INET connection oriented
protocols, such as DCCP, that in my private tree already uses most of
these members.
The functions that operate on these members were renamed, using a
inet_csk_ prefix while not being moved yet to a new file, so as to
ease the review of these changes.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_timer.c')
-rw-r--r-- | net/ipv4/tcp_timer.c | 165 |
1 files changed, 87 insertions, 78 deletions
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 0084227438c2..0b71380ee42f 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -36,9 +36,9 @@ static void tcp_write_timer(unsigned long); | |||
36 | static void tcp_delack_timer(unsigned long); | 36 | static void tcp_delack_timer(unsigned long); |
37 | static void tcp_keepalive_timer (unsigned long data); | 37 | static void tcp_keepalive_timer (unsigned long data); |
38 | 38 | ||
39 | #ifdef TCP_DEBUG | 39 | #ifdef INET_CSK_DEBUG |
40 | const char tcp_timer_bug_msg[] = KERN_DEBUG "tcpbug: unknown timer value\n"; | 40 | const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n"; |
41 | EXPORT_SYMBOL(tcp_timer_bug_msg); | 41 | EXPORT_SYMBOL(inet_csk_timer_bug_msg); |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | /* | 44 | /* |
@@ -46,40 +46,45 @@ EXPORT_SYMBOL(tcp_timer_bug_msg); | |||
46 | * We may wish use just one timer maintaining a list of expire jiffies | 46 | * We may wish use just one timer maintaining a list of expire jiffies |
47 | * to optimize. | 47 | * to optimize. |
48 | */ | 48 | */ |
49 | 49 | void inet_csk_init_xmit_timers(struct sock *sk, | |
50 | void tcp_init_xmit_timers(struct sock *sk) | 50 | void (*retransmit_handler)(unsigned long), |
51 | void (*delack_handler)(unsigned long), | ||
52 | void (*keepalive_handler)(unsigned long)) | ||
51 | { | 53 | { |
52 | struct tcp_sock *tp = tcp_sk(sk); | 54 | struct inet_connection_sock *icsk = inet_csk(sk); |
53 | 55 | ||
54 | init_timer(&tp->retransmit_timer); | 56 | init_timer(&icsk->icsk_retransmit_timer); |
55 | tp->retransmit_timer.function=&tcp_write_timer; | 57 | init_timer(&icsk->icsk_delack_timer); |
56 | tp->retransmit_timer.data = (unsigned long) sk; | 58 | init_timer(&sk->sk_timer); |
57 | tp->pending = 0; | ||
58 | 59 | ||
59 | init_timer(&tp->delack_timer); | 60 | icsk->icsk_retransmit_timer.function = retransmit_handler; |
60 | tp->delack_timer.function=&tcp_delack_timer; | 61 | icsk->icsk_delack_timer.function = delack_handler; |
61 | tp->delack_timer.data = (unsigned long) sk; | 62 | sk->sk_timer.function = keepalive_handler; |
62 | tp->ack.pending = 0; | ||
63 | 63 | ||
64 | init_timer(&sk->sk_timer); | 64 | icsk->icsk_retransmit_timer.data = |
65 | sk->sk_timer.function = &tcp_keepalive_timer; | 65 | icsk->icsk_delack_timer.data = |
66 | sk->sk_timer.data = (unsigned long)sk; | 66 | sk->sk_timer.data = (unsigned long)sk; |
67 | |||
68 | icsk->icsk_pending = icsk->icsk_ack.pending = 0; | ||
67 | } | 69 | } |
68 | 70 | ||
69 | void tcp_clear_xmit_timers(struct sock *sk) | 71 | void inet_csk_clear_xmit_timers(struct sock *sk) |
70 | { | 72 | { |
71 | struct tcp_sock *tp = tcp_sk(sk); | 73 | struct inet_connection_sock *icsk = inet_csk(sk); |
72 | 74 | ||
73 | tp->pending = 0; | 75 | icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0; |
74 | sk_stop_timer(sk, &tp->retransmit_timer); | ||
75 | |||
76 | tp->ack.pending = 0; | ||
77 | tp->ack.blocked = 0; | ||
78 | sk_stop_timer(sk, &tp->delack_timer); | ||
79 | 76 | ||
77 | sk_stop_timer(sk, &icsk->icsk_retransmit_timer); | ||
78 | sk_stop_timer(sk, &icsk->icsk_delack_timer); | ||
80 | sk_stop_timer(sk, &sk->sk_timer); | 79 | sk_stop_timer(sk, &sk->sk_timer); |
81 | } | 80 | } |
82 | 81 | ||
82 | void tcp_init_xmit_timers(struct sock *sk) | ||
83 | { | ||
84 | inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, | ||
85 | &tcp_keepalive_timer); | ||
86 | } | ||
87 | |||
83 | static void tcp_write_err(struct sock *sk) | 88 | static void tcp_write_err(struct sock *sk) |
84 | { | 89 | { |
85 | sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; | 90 | sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; |
@@ -155,15 +160,15 @@ static int tcp_orphan_retries(struct sock *sk, int alive) | |||
155 | /* A write timeout has occurred. Process the after effects. */ | 160 | /* A write timeout has occurred. Process the after effects. */ |
156 | static int tcp_write_timeout(struct sock *sk) | 161 | static int tcp_write_timeout(struct sock *sk) |
157 | { | 162 | { |
158 | struct tcp_sock *tp = tcp_sk(sk); | 163 | const struct inet_connection_sock *icsk = inet_csk(sk); |
159 | int retry_until; | 164 | int retry_until; |
160 | 165 | ||
161 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 166 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
162 | if (tp->retransmits) | 167 | if (icsk->icsk_retransmits) |
163 | dst_negative_advice(&sk->sk_dst_cache); | 168 | dst_negative_advice(&sk->sk_dst_cache); |
164 | retry_until = tp->syn_retries ? : sysctl_tcp_syn_retries; | 169 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
165 | } else { | 170 | } else { |
166 | if (tp->retransmits >= sysctl_tcp_retries1) { | 171 | if (icsk->icsk_retransmits >= sysctl_tcp_retries1) { |
167 | /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black | 172 | /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black |
168 | hole detection. :-( | 173 | hole detection. :-( |
169 | 174 | ||
@@ -189,16 +194,16 @@ static int tcp_write_timeout(struct sock *sk) | |||
189 | 194 | ||
190 | retry_until = sysctl_tcp_retries2; | 195 | retry_until = sysctl_tcp_retries2; |
191 | if (sock_flag(sk, SOCK_DEAD)) { | 196 | if (sock_flag(sk, SOCK_DEAD)) { |
192 | int alive = (tp->rto < TCP_RTO_MAX); | 197 | const int alive = (icsk->icsk_rto < TCP_RTO_MAX); |
193 | 198 | ||
194 | retry_until = tcp_orphan_retries(sk, alive); | 199 | retry_until = tcp_orphan_retries(sk, alive); |
195 | 200 | ||
196 | if (tcp_out_of_resources(sk, alive || tp->retransmits < retry_until)) | 201 | if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until)) |
197 | return 1; | 202 | return 1; |
198 | } | 203 | } |
199 | } | 204 | } |
200 | 205 | ||
201 | if (tp->retransmits >= retry_until) { | 206 | if (icsk->icsk_retransmits >= retry_until) { |
202 | /* Has it gone just too far? */ | 207 | /* Has it gone just too far? */ |
203 | tcp_write_err(sk); | 208 | tcp_write_err(sk); |
204 | return 1; | 209 | return 1; |
@@ -210,26 +215,27 @@ static void tcp_delack_timer(unsigned long data) | |||
210 | { | 215 | { |
211 | struct sock *sk = (struct sock*)data; | 216 | struct sock *sk = (struct sock*)data; |
212 | struct tcp_sock *tp = tcp_sk(sk); | 217 | struct tcp_sock *tp = tcp_sk(sk); |
218 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
213 | 219 | ||
214 | bh_lock_sock(sk); | 220 | bh_lock_sock(sk); |
215 | if (sock_owned_by_user(sk)) { | 221 | if (sock_owned_by_user(sk)) { |
216 | /* Try again later. */ | 222 | /* Try again later. */ |
217 | tp->ack.blocked = 1; | 223 | icsk->icsk_ack.blocked = 1; |
218 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); | 224 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); |
219 | sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN); | 225 | sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); |
220 | goto out_unlock; | 226 | goto out_unlock; |
221 | } | 227 | } |
222 | 228 | ||
223 | sk_stream_mem_reclaim(sk); | 229 | sk_stream_mem_reclaim(sk); |
224 | 230 | ||
225 | if (sk->sk_state == TCP_CLOSE || !(tp->ack.pending & TCP_ACK_TIMER)) | 231 | if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) |
226 | goto out; | 232 | goto out; |
227 | 233 | ||
228 | if (time_after(tp->ack.timeout, jiffies)) { | 234 | if (time_after(icsk->icsk_ack.timeout, jiffies)) { |
229 | sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout); | 235 | sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); |
230 | goto out; | 236 | goto out; |
231 | } | 237 | } |
232 | tp->ack.pending &= ~TCP_ACK_TIMER; | 238 | icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; |
233 | 239 | ||
234 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { | 240 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { |
235 | struct sk_buff *skb; | 241 | struct sk_buff *skb; |
@@ -242,16 +248,16 @@ static void tcp_delack_timer(unsigned long data) | |||
242 | tp->ucopy.memory = 0; | 248 | tp->ucopy.memory = 0; |
243 | } | 249 | } |
244 | 250 | ||
245 | if (tcp_ack_scheduled(tp)) { | 251 | if (inet_csk_ack_scheduled(sk)) { |
246 | if (!tp->ack.pingpong) { | 252 | if (!icsk->icsk_ack.pingpong) { |
247 | /* Delayed ACK missed: inflate ATO. */ | 253 | /* Delayed ACK missed: inflate ATO. */ |
248 | tp->ack.ato = min(tp->ack.ato << 1, tp->rto); | 254 | icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); |
249 | } else { | 255 | } else { |
250 | /* Delayed ACK missed: leave pingpong mode and | 256 | /* Delayed ACK missed: leave pingpong mode and |
251 | * deflate ATO. | 257 | * deflate ATO. |
252 | */ | 258 | */ |
253 | tp->ack.pingpong = 0; | 259 | icsk->icsk_ack.pingpong = 0; |
254 | tp->ack.ato = TCP_ATO_MIN; | 260 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
255 | } | 261 | } |
256 | tcp_send_ack(sk); | 262 | tcp_send_ack(sk); |
257 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); | 263 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); |
@@ -294,7 +300,8 @@ static void tcp_probe_timer(struct sock *sk) | |||
294 | max_probes = sysctl_tcp_retries2; | 300 | max_probes = sysctl_tcp_retries2; |
295 | 301 | ||
296 | if (sock_flag(sk, SOCK_DEAD)) { | 302 | if (sock_flag(sk, SOCK_DEAD)) { |
297 | int alive = ((tp->rto<<tp->backoff) < TCP_RTO_MAX); | 303 | const struct inet_connection_sock *icsk = inet_csk(sk); |
304 | const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); | ||
298 | 305 | ||
299 | max_probes = tcp_orphan_retries(sk, alive); | 306 | max_probes = tcp_orphan_retries(sk, alive); |
300 | 307 | ||
@@ -317,6 +324,7 @@ static void tcp_probe_timer(struct sock *sk) | |||
317 | static void tcp_retransmit_timer(struct sock *sk) | 324 | static void tcp_retransmit_timer(struct sock *sk) |
318 | { | 325 | { |
319 | struct tcp_sock *tp = tcp_sk(sk); | 326 | struct tcp_sock *tp = tcp_sk(sk); |
327 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
320 | 328 | ||
321 | if (!tp->packets_out) | 329 | if (!tp->packets_out) |
322 | goto out; | 330 | goto out; |
@@ -351,7 +359,7 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
351 | if (tcp_write_timeout(sk)) | 359 | if (tcp_write_timeout(sk)) |
352 | goto out; | 360 | goto out; |
353 | 361 | ||
354 | if (tp->retransmits == 0) { | 362 | if (icsk->icsk_retransmits == 0) { |
355 | if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) { | 363 | if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) { |
356 | if (tp->rx_opt.sack_ok) { | 364 | if (tp->rx_opt.sack_ok) { |
357 | if (tp->ca_state == TCP_CA_Recovery) | 365 | if (tp->ca_state == TCP_CA_Recovery) |
@@ -381,10 +389,10 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
381 | /* Retransmission failed because of local congestion, | 389 | /* Retransmission failed because of local congestion, |
382 | * do not backoff. | 390 | * do not backoff. |
383 | */ | 391 | */ |
384 | if (!tp->retransmits) | 392 | if (!icsk->icsk_retransmits) |
385 | tp->retransmits=1; | 393 | icsk->icsk_retransmits = 1; |
386 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, | 394 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
387 | min(tp->rto, TCP_RESOURCE_PROBE_INTERVAL)); | 395 | min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL)); |
388 | goto out; | 396 | goto out; |
389 | } | 397 | } |
390 | 398 | ||
@@ -403,13 +411,13 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
403 | * implemented ftp to mars will work nicely. We will have to fix | 411 | * implemented ftp to mars will work nicely. We will have to fix |
404 | * the 120 second clamps though! | 412 | * the 120 second clamps though! |
405 | */ | 413 | */ |
406 | tp->backoff++; | 414 | icsk->icsk_backoff++; |
407 | tp->retransmits++; | 415 | icsk->icsk_retransmits++; |
408 | 416 | ||
409 | out_reset_timer: | 417 | out_reset_timer: |
410 | tp->rto = min(tp->rto << 1, TCP_RTO_MAX); | 418 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); |
411 | tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); | 419 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto); |
412 | if (tp->retransmits > sysctl_tcp_retries1) | 420 | if (icsk->icsk_retransmits > sysctl_tcp_retries1) |
413 | __sk_dst_reset(sk); | 421 | __sk_dst_reset(sk); |
414 | 422 | ||
415 | out:; | 423 | out:; |
@@ -418,32 +426,32 @@ out:; | |||
418 | static void tcp_write_timer(unsigned long data) | 426 | static void tcp_write_timer(unsigned long data) |
419 | { | 427 | { |
420 | struct sock *sk = (struct sock*)data; | 428 | struct sock *sk = (struct sock*)data; |
421 | struct tcp_sock *tp = tcp_sk(sk); | 429 | struct inet_connection_sock *icsk = inet_csk(sk); |
422 | int event; | 430 | int event; |
423 | 431 | ||
424 | bh_lock_sock(sk); | 432 | bh_lock_sock(sk); |
425 | if (sock_owned_by_user(sk)) { | 433 | if (sock_owned_by_user(sk)) { |
426 | /* Try again later */ | 434 | /* Try again later */ |
427 | sk_reset_timer(sk, &tp->retransmit_timer, jiffies + (HZ / 20)); | 435 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20)); |
428 | goto out_unlock; | 436 | goto out_unlock; |
429 | } | 437 | } |
430 | 438 | ||
431 | if (sk->sk_state == TCP_CLOSE || !tp->pending) | 439 | if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) |
432 | goto out; | 440 | goto out; |
433 | 441 | ||
434 | if (time_after(tp->timeout, jiffies)) { | 442 | if (time_after(icsk->icsk_timeout, jiffies)) { |
435 | sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout); | 443 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); |
436 | goto out; | 444 | goto out; |
437 | } | 445 | } |
438 | 446 | ||
439 | event = tp->pending; | 447 | event = icsk->icsk_pending; |
440 | tp->pending = 0; | 448 | icsk->icsk_pending = 0; |
441 | 449 | ||
442 | switch (event) { | 450 | switch (event) { |
443 | case TCP_TIME_RETRANS: | 451 | case ICSK_TIME_RETRANS: |
444 | tcp_retransmit_timer(sk); | 452 | tcp_retransmit_timer(sk); |
445 | break; | 453 | break; |
446 | case TCP_TIME_PROBE0: | 454 | case ICSK_TIME_PROBE0: |
447 | tcp_probe_timer(sk); | 455 | tcp_probe_timer(sk); |
448 | break; | 456 | break; |
449 | } | 457 | } |
@@ -463,8 +471,9 @@ out_unlock: | |||
463 | static void tcp_synack_timer(struct sock *sk) | 471 | static void tcp_synack_timer(struct sock *sk) |
464 | { | 472 | { |
465 | struct tcp_sock *tp = tcp_sk(sk); | 473 | struct tcp_sock *tp = tcp_sk(sk); |
466 | struct listen_sock *lopt = tp->accept_queue.listen_opt; | 474 | struct inet_connection_sock *icsk = inet_csk(sk); |
467 | int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries; | 475 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; |
476 | int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; | ||
468 | int thresh = max_retries; | 477 | int thresh = max_retries; |
469 | unsigned long now = jiffies; | 478 | unsigned long now = jiffies; |
470 | struct request_sock **reqp, *req; | 479 | struct request_sock **reqp, *req; |
@@ -526,8 +535,8 @@ static void tcp_synack_timer(struct sock *sk) | |||
526 | } | 535 | } |
527 | 536 | ||
528 | /* Drop this request */ | 537 | /* Drop this request */ |
529 | tcp_synq_unlink(tp, req, reqp); | 538 | inet_csk_reqsk_queue_unlink(sk, req, reqp); |
530 | reqsk_queue_removed(&tp->accept_queue, req); | 539 | reqsk_queue_removed(&icsk->icsk_accept_queue, req); |
531 | reqsk_free(req); | 540 | reqsk_free(req); |
532 | continue; | 541 | continue; |
533 | } | 542 | } |
@@ -541,15 +550,15 @@ static void tcp_synack_timer(struct sock *sk) | |||
541 | lopt->clock_hand = i; | 550 | lopt->clock_hand = i; |
542 | 551 | ||
543 | if (lopt->qlen) | 552 | if (lopt->qlen) |
544 | tcp_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL); | 553 | inet_csk_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL); |
545 | } | 554 | } |
546 | 555 | ||
547 | void tcp_delete_keepalive_timer (struct sock *sk) | 556 | void inet_csk_delete_keepalive_timer(struct sock *sk) |
548 | { | 557 | { |
549 | sk_stop_timer(sk, &sk->sk_timer); | 558 | sk_stop_timer(sk, &sk->sk_timer); |
550 | } | 559 | } |
551 | 560 | ||
552 | void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len) | 561 | void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) |
553 | { | 562 | { |
554 | sk_reset_timer(sk, &sk->sk_timer, jiffies + len); | 563 | sk_reset_timer(sk, &sk->sk_timer, jiffies + len); |
555 | } | 564 | } |
@@ -560,9 +569,9 @@ void tcp_set_keepalive(struct sock *sk, int val) | |||
560 | return; | 569 | return; |
561 | 570 | ||
562 | if (val && !sock_flag(sk, SOCK_KEEPOPEN)) | 571 | if (val && !sock_flag(sk, SOCK_KEEPOPEN)) |
563 | tcp_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); | 572 | inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); |
564 | else if (!val) | 573 | else if (!val) |
565 | tcp_delete_keepalive_timer(sk); | 574 | inet_csk_delete_keepalive_timer(sk); |
566 | } | 575 | } |
567 | 576 | ||
568 | 577 | ||
@@ -576,7 +585,7 @@ static void tcp_keepalive_timer (unsigned long data) | |||
576 | bh_lock_sock(sk); | 585 | bh_lock_sock(sk); |
577 | if (sock_owned_by_user(sk)) { | 586 | if (sock_owned_by_user(sk)) { |
578 | /* Try again later. */ | 587 | /* Try again later. */ |
579 | tcp_reset_keepalive_timer (sk, HZ/20); | 588 | inet_csk_reset_keepalive_timer (sk, HZ/20); |
580 | goto out; | 589 | goto out; |
581 | } | 590 | } |
582 | 591 | ||
@@ -587,7 +596,7 @@ static void tcp_keepalive_timer (unsigned long data) | |||
587 | 596 | ||
588 | if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { | 597 | if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { |
589 | if (tp->linger2 >= 0) { | 598 | if (tp->linger2 >= 0) { |
590 | int tmo = tcp_fin_time(tp) - TCP_TIMEWAIT_LEN; | 599 | const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; |
591 | 600 | ||
592 | if (tmo > 0) { | 601 | if (tmo > 0) { |
593 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | 602 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
@@ -634,7 +643,7 @@ static void tcp_keepalive_timer (unsigned long data) | |||
634 | sk_stream_mem_reclaim(sk); | 643 | sk_stream_mem_reclaim(sk); |
635 | 644 | ||
636 | resched: | 645 | resched: |
637 | tcp_reset_keepalive_timer (sk, elapsed); | 646 | inet_csk_reset_keepalive_timer (sk, elapsed); |
638 | goto out; | 647 | goto out; |
639 | 648 | ||
640 | death: | 649 | death: |
@@ -645,7 +654,7 @@ out: | |||
645 | sock_put(sk); | 654 | sock_put(sk); |
646 | } | 655 | } |
647 | 656 | ||
648 | EXPORT_SYMBOL(tcp_clear_xmit_timers); | 657 | EXPORT_SYMBOL(inet_csk_clear_xmit_timers); |
649 | EXPORT_SYMBOL(tcp_delete_keepalive_timer); | 658 | EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); |
650 | EXPORT_SYMBOL(tcp_init_xmit_timers); | 659 | EXPORT_SYMBOL(tcp_init_xmit_timers); |
651 | EXPORT_SYMBOL(tcp_reset_keepalive_timer); | 660 | EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); |