diff options
author | Arnaldo Carvalho de Melo <acme@mandriva.com> | 2005-08-09 23:44:40 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:55:48 -0400 |
commit | 295ff7edb8f72b77d524759266f7524deae379b3 (patch) | |
tree | e16e99e324444fb01ae3dfd221b5d47a88acfeb8 /net/ipv4/tcp_ipv4.c | |
parent | 0b4e03bf0bc43ad6250a1e2fa25fc3eb2b028977 (diff) |
[TIMEWAIT]: Introduce inet_timewait_death_row
That groups all of the tables and variables associated to the TCP timewait
schedulling/recycling/killing code, that now can be isolated from the TCP
specific code and used by other transport protocols, such as DCCP.
Next changeset will move this code to net/ipv4/inet_timewait_sock.c
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index b966102b9f39..83f72346274a 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -199,7 +199,7 @@ unique: | |||
199 | NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); | 199 | NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); |
200 | } else if (tw) { | 200 | } else if (tw) { |
201 | /* Silly. Should hash-dance instead... */ | 201 | /* Silly. Should hash-dance instead... */ |
202 | tcp_tw_deschedule(tw); | 202 | inet_twsk_deschedule(tw, &tcp_death_row); |
203 | NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); | 203 | NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); |
204 | 204 | ||
205 | inet_twsk_put(tw); | 205 | inet_twsk_put(tw); |
@@ -291,7 +291,7 @@ ok: | |||
291 | spin_unlock(&head->lock); | 291 | spin_unlock(&head->lock); |
292 | 292 | ||
293 | if (tw) { | 293 | if (tw) { |
294 | tcp_tw_deschedule(tw); | 294 | inet_twsk_deschedule(tw, &tcp_death_row);; |
295 | inet_twsk_put(tw); | 295 | inet_twsk_put(tw); |
296 | } | 296 | } |
297 | 297 | ||
@@ -366,7 +366,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
366 | tp->write_seq = 0; | 366 | tp->write_seq = 0; |
367 | } | 367 | } |
368 | 368 | ||
369 | if (sysctl_tcp_tw_recycle && | 369 | if (tcp_death_row.sysctl_tw_recycle && |
370 | !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) { | 370 | !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) { |
371 | struct inet_peer *peer = rt_get_peer(rt); | 371 | struct inet_peer *peer = rt_get_peer(rt); |
372 | 372 | ||
@@ -965,7 +965,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
965 | * are made in the function processing timewait state. | 965 | * are made in the function processing timewait state. |
966 | */ | 966 | */ |
967 | if (tmp_opt.saw_tstamp && | 967 | if (tmp_opt.saw_tstamp && |
968 | sysctl_tcp_tw_recycle && | 968 | tcp_death_row.sysctl_tw_recycle && |
969 | (dst = inet_csk_route_req(sk, req)) != NULL && | 969 | (dst = inet_csk_route_req(sk, req)) != NULL && |
970 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && | 970 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && |
971 | peer->v4daddr == saddr) { | 971 | peer->v4daddr == saddr) { |
@@ -1305,7 +1305,8 @@ do_time_wait: | |||
1305 | ntohs(th->dest), | 1305 | ntohs(th->dest), |
1306 | inet_iif(skb)); | 1306 | inet_iif(skb)); |
1307 | if (sk2) { | 1307 | if (sk2) { |
1308 | tcp_tw_deschedule((struct inet_timewait_sock *)sk); | 1308 | inet_twsk_deschedule((struct inet_timewait_sock *)sk, |
1309 | &tcp_death_row); | ||
1309 | inet_twsk_put((struct inet_timewait_sock *)sk); | 1310 | inet_twsk_put((struct inet_timewait_sock *)sk); |
1310 | sk = sk2; | 1311 | sk = sk2; |
1311 | goto process; | 1312 | goto process; |