diff options
author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2005-08-09 23:09:30 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:42:13 -0400 |
commit | 8feaf0c0a5488b3d898a9c207eb6678f44ba3f26 (patch) | |
tree | ddd004afe2f7c8295f6fdb94d34f78a42b5961cb /net/ipv4/tcp_minisocks.c | |
parent | 33b62231908c58ae04185e4f1063d1e35a7c8576 (diff) |
[INET]: Generalise tcp_tw_bucket, aka TIME_WAIT sockets
This paves the way to generalise the rest of the sock ID lookup
routines and saves some bytes in TCPv4 TIME_WAIT sockets on distro
kernels (where IPv6 is always built as a module):
[root@qemu ~]# grep tw_sock /proc/slabinfo
tw_sock_TCPv6 0 0 128 31 1
tw_sock_TCP 0 0 96 41 1
[root@qemu ~]#
Now if a protocol wants to use the TIME_WAIT generic infrastructure it
only has to set the sk_prot->twsk_obj_size field with the size of its
inet_timewait_sock derived sock and proto_register will create
sk_prot->twsk_slab, for now its only for INET sockets, but we can
introduce timewait_sock later if some non INET transport protocolo
wants to use this stuff.
Next changesets will take advantage of this new infrastructure to
generalise even more TCP code.
[acme@toy net-2.6.14]$ grep built-in /tmp/before.size /tmp/after.size
/tmp/before.size: 188646 11764 5068 205478 322a6 net/ipv4/built-in.o
/tmp/after.size: 188144 11764 5068 204976 320b0 net/ipv4/built-in.o
[acme@toy net-2.6.14]$
Tested with both IPv4 & IPv6 (::1 (localhost) & ::ffff:172.20.0.1
(qemu host)).
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 142 |
1 files changed, 73 insertions, 69 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index f29e2f6ebe1b..5b5a49335fbb 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -41,7 +41,7 @@ int sysctl_tcp_max_tw_buckets = NR_FILE*2; | |||
41 | int sysctl_tcp_syncookies = SYNC_INIT; | 41 | int sysctl_tcp_syncookies = SYNC_INIT; |
42 | int sysctl_tcp_abort_on_overflow; | 42 | int sysctl_tcp_abort_on_overflow; |
43 | 43 | ||
44 | static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo); | 44 | static void tcp_tw_schedule(struct inet_timewait_sock *tw, int timeo); |
45 | 45 | ||
46 | static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) | 46 | static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) |
47 | { | 47 | { |
@@ -58,7 +58,7 @@ int tcp_tw_count; | |||
58 | 58 | ||
59 | 59 | ||
60 | /* Must be called with locally disabled BHs. */ | 60 | /* Must be called with locally disabled BHs. */ |
61 | static void tcp_timewait_kill(struct tcp_tw_bucket *tw) | 61 | static void tcp_timewait_kill(struct inet_timewait_sock *tw) |
62 | { | 62 | { |
63 | struct inet_bind_hashbucket *bhead; | 63 | struct inet_bind_hashbucket *bhead; |
64 | struct inet_bind_bucket *tb; | 64 | struct inet_bind_bucket *tb; |
@@ -85,11 +85,11 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw) | |||
85 | 85 | ||
86 | #ifdef SOCK_REFCNT_DEBUG | 86 | #ifdef SOCK_REFCNT_DEBUG |
87 | if (atomic_read(&tw->tw_refcnt) != 1) { | 87 | if (atomic_read(&tw->tw_refcnt) != 1) { |
88 | printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw, | 88 | printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", |
89 | atomic_read(&tw->tw_refcnt)); | 89 | tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt)); |
90 | } | 90 | } |
91 | #endif | 91 | #endif |
92 | tcp_tw_put(tw); | 92 | inet_twsk_put(tw); |
93 | } | 93 | } |
94 | 94 | ||
95 | /* | 95 | /* |
@@ -121,19 +121,20 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw) | |||
121 | * to avoid misread sequence numbers, states etc. --ANK | 121 | * to avoid misread sequence numbers, states etc. --ANK |
122 | */ | 122 | */ |
123 | enum tcp_tw_status | 123 | enum tcp_tw_status |
124 | tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, | 124 | tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, |
125 | struct tcphdr *th, unsigned len) | 125 | const struct tcphdr *th) |
126 | { | 126 | { |
127 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | ||
127 | struct tcp_options_received tmp_opt; | 128 | struct tcp_options_received tmp_opt; |
128 | int paws_reject = 0; | 129 | int paws_reject = 0; |
129 | 130 | ||
130 | tmp_opt.saw_tstamp = 0; | 131 | tmp_opt.saw_tstamp = 0; |
131 | if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) { | 132 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { |
132 | tcp_parse_options(skb, &tmp_opt, 0); | 133 | tcp_parse_options(skb, &tmp_opt, 0); |
133 | 134 | ||
134 | if (tmp_opt.saw_tstamp) { | 135 | if (tmp_opt.saw_tstamp) { |
135 | tmp_opt.ts_recent = tw->tw_ts_recent; | 136 | tmp_opt.ts_recent = tcptw->tw_ts_recent; |
136 | tmp_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; | 137 | tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; |
137 | paws_reject = tcp_paws_check(&tmp_opt, th->rst); | 138 | paws_reject = tcp_paws_check(&tmp_opt, th->rst); |
138 | } | 139 | } |
139 | } | 140 | } |
@@ -144,20 +145,20 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, | |||
144 | /* Out of window, send ACK */ | 145 | /* Out of window, send ACK */ |
145 | if (paws_reject || | 146 | if (paws_reject || |
146 | !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, | 147 | !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, |
147 | tw->tw_rcv_nxt, | 148 | tcptw->tw_rcv_nxt, |
148 | tw->tw_rcv_nxt + tw->tw_rcv_wnd)) | 149 | tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) |
149 | return TCP_TW_ACK; | 150 | return TCP_TW_ACK; |
150 | 151 | ||
151 | if (th->rst) | 152 | if (th->rst) |
152 | goto kill; | 153 | goto kill; |
153 | 154 | ||
154 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt)) | 155 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt)) |
155 | goto kill_with_rst; | 156 | goto kill_with_rst; |
156 | 157 | ||
157 | /* Dup ACK? */ | 158 | /* Dup ACK? */ |
158 | if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) || | 159 | if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || |
159 | TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { | 160 | TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { |
160 | tcp_tw_put(tw); | 161 | inet_twsk_put(tw); |
161 | return TCP_TW_SUCCESS; | 162 | return TCP_TW_SUCCESS; |
162 | } | 163 | } |
163 | 164 | ||
@@ -165,19 +166,19 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, | |||
165 | * reset. | 166 | * reset. |
166 | */ | 167 | */ |
167 | if (!th->fin || | 168 | if (!th->fin || |
168 | TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) { | 169 | TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) { |
169 | kill_with_rst: | 170 | kill_with_rst: |
170 | tcp_tw_deschedule(tw); | 171 | tcp_tw_deschedule(tw); |
171 | tcp_tw_put(tw); | 172 | inet_twsk_put(tw); |
172 | return TCP_TW_RST; | 173 | return TCP_TW_RST; |
173 | } | 174 | } |
174 | 175 | ||
175 | /* FIN arrived, enter true time-wait state. */ | 176 | /* FIN arrived, enter true time-wait state. */ |
176 | tw->tw_substate = TCP_TIME_WAIT; | 177 | tw->tw_substate = TCP_TIME_WAIT; |
177 | tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 178 | tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
178 | if (tmp_opt.saw_tstamp) { | 179 | if (tmp_opt.saw_tstamp) { |
179 | tw->tw_ts_recent_stamp = xtime.tv_sec; | 180 | tcptw->tw_ts_recent_stamp = xtime.tv_sec; |
180 | tw->tw_ts_recent = tmp_opt.rcv_tsval; | 181 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
181 | } | 182 | } |
182 | 183 | ||
183 | /* I am shamed, but failed to make it more elegant. | 184 | /* I am shamed, but failed to make it more elegant. |
@@ -186,7 +187,7 @@ kill_with_rst: | |||
186 | * do not undertsnad recycling in any case, it not | 187 | * do not undertsnad recycling in any case, it not |
187 | * a big problem in practice. --ANK */ | 188 | * a big problem in practice. --ANK */ |
188 | if (tw->tw_family == AF_INET && | 189 | if (tw->tw_family == AF_INET && |
189 | sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp && | 190 | sysctl_tcp_tw_recycle && tcptw->tw_ts_recent_stamp && |
190 | tcp_v4_tw_remember_stamp(tw)) | 191 | tcp_v4_tw_remember_stamp(tw)) |
191 | tcp_tw_schedule(tw, tw->tw_timeout); | 192 | tcp_tw_schedule(tw, tw->tw_timeout); |
192 | else | 193 | else |
@@ -212,7 +213,7 @@ kill_with_rst: | |||
212 | */ | 213 | */ |
213 | 214 | ||
214 | if (!paws_reject && | 215 | if (!paws_reject && |
215 | (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt && | 216 | (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && |
216 | (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { | 217 | (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { |
217 | /* In window segment, it may be only reset or bare ack. */ | 218 | /* In window segment, it may be only reset or bare ack. */ |
218 | 219 | ||
@@ -224,18 +225,18 @@ kill_with_rst: | |||
224 | if (sysctl_tcp_rfc1337 == 0) { | 225 | if (sysctl_tcp_rfc1337 == 0) { |
225 | kill: | 226 | kill: |
226 | tcp_tw_deschedule(tw); | 227 | tcp_tw_deschedule(tw); |
227 | tcp_tw_put(tw); | 228 | inet_twsk_put(tw); |
228 | return TCP_TW_SUCCESS; | 229 | return TCP_TW_SUCCESS; |
229 | } | 230 | } |
230 | } | 231 | } |
231 | tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); | 232 | tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); |
232 | 233 | ||
233 | if (tmp_opt.saw_tstamp) { | 234 | if (tmp_opt.saw_tstamp) { |
234 | tw->tw_ts_recent = tmp_opt.rcv_tsval; | 235 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
235 | tw->tw_ts_recent_stamp = xtime.tv_sec; | 236 | tcptw->tw_ts_recent_stamp = xtime.tv_sec; |
236 | } | 237 | } |
237 | 238 | ||
238 | tcp_tw_put(tw); | 239 | inet_twsk_put(tw); |
239 | return TCP_TW_SUCCESS; | 240 | return TCP_TW_SUCCESS; |
240 | } | 241 | } |
241 | 242 | ||
@@ -257,9 +258,10 @@ kill: | |||
257 | */ | 258 | */ |
258 | 259 | ||
259 | if (th->syn && !th->rst && !th->ack && !paws_reject && | 260 | if (th->syn && !th->rst && !th->ack && !paws_reject && |
260 | (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) || | 261 | (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || |
261 | (tmp_opt.saw_tstamp && (s32)(tw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { | 262 | (tmp_opt.saw_tstamp && |
262 | u32 isn = tw->tw_snd_nxt + 65535 + 2; | 263 | (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { |
264 | u32 isn = tcptw->tw_snd_nxt + 65535 + 2; | ||
263 | if (isn == 0) | 265 | if (isn == 0) |
264 | isn++; | 266 | isn++; |
265 | TCP_SKB_CB(skb)->when = isn; | 267 | TCP_SKB_CB(skb)->when = isn; |
@@ -284,7 +286,7 @@ kill: | |||
284 | */ | 286 | */ |
285 | return TCP_TW_ACK; | 287 | return TCP_TW_ACK; |
286 | } | 288 | } |
287 | tcp_tw_put(tw); | 289 | inet_twsk_put(tw); |
288 | return TCP_TW_SUCCESS; | 290 | return TCP_TW_SUCCESS; |
289 | } | 291 | } |
290 | 292 | ||
@@ -293,7 +295,7 @@ kill: | |||
293 | * relevant info into it from the SK, and mess with hash chains | 295 | * relevant info into it from the SK, and mess with hash chains |
294 | * and list linkage. | 296 | * and list linkage. |
295 | */ | 297 | */ |
296 | static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) | 298 | static void __tcp_tw_hashdance(struct sock *sk, struct inet_timewait_sock *tw) |
297 | { | 299 | { |
298 | const struct inet_sock *inet = inet_sk(sk); | 300 | const struct inet_sock *inet = inet_sk(sk); |
299 | struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[sk->sk_hashent]; | 301 | struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[sk->sk_hashent]; |
@@ -306,7 +308,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) | |||
306 | spin_lock(&bhead->lock); | 308 | spin_lock(&bhead->lock); |
307 | tw->tw_tb = inet->bind_hash; | 309 | tw->tw_tb = inet->bind_hash; |
308 | BUG_TRAP(inet->bind_hash); | 310 | BUG_TRAP(inet->bind_hash); |
309 | tw_add_bind_node(tw, &tw->tw_tb->owners); | 311 | inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); |
310 | spin_unlock(&bhead->lock); | 312 | spin_unlock(&bhead->lock); |
311 | 313 | ||
312 | write_lock(&ehead->lock); | 314 | write_lock(&ehead->lock); |
@@ -316,7 +318,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) | |||
316 | sock_prot_dec_use(sk->sk_prot); | 318 | sock_prot_dec_use(sk->sk_prot); |
317 | 319 | ||
318 | /* Step 3: Hash TW into TIMEWAIT half of established hash table. */ | 320 | /* Step 3: Hash TW into TIMEWAIT half of established hash table. */ |
319 | tw_add_node(tw, &(ehead + tcp_hashinfo.ehash_size)->chain); | 321 | inet_twsk_add_node(tw, &(ehead + tcp_hashinfo.ehash_size)->chain); |
320 | atomic_inc(&tw->tw_refcnt); | 322 | atomic_inc(&tw->tw_refcnt); |
321 | 323 | ||
322 | write_unlock(&ehead->lock); | 324 | write_unlock(&ehead->lock); |
@@ -327,19 +329,23 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) | |||
327 | */ | 329 | */ |
328 | void tcp_time_wait(struct sock *sk, int state, int timeo) | 330 | void tcp_time_wait(struct sock *sk, int state, int timeo) |
329 | { | 331 | { |
330 | struct tcp_tw_bucket *tw = NULL; | 332 | struct inet_timewait_sock *tw = NULL; |
331 | struct tcp_sock *tp = tcp_sk(sk); | 333 | const struct tcp_sock *tp = tcp_sk(sk); |
332 | int recycle_ok = 0; | 334 | int recycle_ok = 0; |
333 | 335 | ||
334 | if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp) | 336 | if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp) |
335 | recycle_ok = tp->af_specific->remember_stamp(sk); | 337 | recycle_ok = tp->af_specific->remember_stamp(sk); |
336 | 338 | ||
337 | if (tcp_tw_count < sysctl_tcp_max_tw_buckets) | 339 | if (tcp_tw_count < sysctl_tcp_max_tw_buckets) |
338 | tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC); | 340 | tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_slab, SLAB_ATOMIC); |
341 | |||
342 | if (tw != NULL) { | ||
343 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | ||
344 | const struct inet_sock *inet = inet_sk(sk); | ||
345 | const int rto = (tp->rto << 2) - (tp->rto >> 1); | ||
339 | 346 | ||
340 | if(tw != NULL) { | 347 | /* Remember our protocol */ |
341 | struct inet_sock *inet = inet_sk(sk); | 348 | tw->tw_prot = sk->sk_prot_creator; |
342 | int rto = (tp->rto<<2) - (tp->rto>>1); | ||
343 | 349 | ||
344 | /* Give us an identity. */ | 350 | /* Give us an identity. */ |
345 | tw->tw_daddr = inet->daddr; | 351 | tw->tw_daddr = inet->daddr; |
@@ -356,25 +362,23 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
356 | atomic_set(&tw->tw_refcnt, 1); | 362 | atomic_set(&tw->tw_refcnt, 1); |
357 | 363 | ||
358 | tw->tw_hashent = sk->sk_hashent; | 364 | tw->tw_hashent = sk->sk_hashent; |
359 | tw->tw_rcv_nxt = tp->rcv_nxt; | 365 | tcptw->tw_rcv_nxt = tp->rcv_nxt; |
360 | tw->tw_snd_nxt = tp->snd_nxt; | 366 | tcptw->tw_snd_nxt = tp->snd_nxt; |
361 | tw->tw_rcv_wnd = tcp_receive_window(tp); | 367 | tcptw->tw_rcv_wnd = tcp_receive_window(tp); |
362 | tw->tw_ts_recent = tp->rx_opt.ts_recent; | 368 | tcptw->tw_ts_recent = tp->rx_opt.ts_recent; |
363 | tw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; | 369 | tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; |
364 | tw_dead_node_init(tw); | 370 | inet_twsk_dead_node_init(tw); |
365 | 371 | ||
366 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 372 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
367 | if (tw->tw_family == PF_INET6) { | 373 | if (tw->tw_family == PF_INET6) { |
368 | struct ipv6_pinfo *np = inet6_sk(sk); | 374 | struct ipv6_pinfo *np = inet6_sk(sk); |
375 | struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw); | ||
369 | 376 | ||
370 | ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr); | 377 | ipv6_addr_copy(&tcp6tw->tw_v6_daddr, &np->daddr); |
371 | ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr); | 378 | ipv6_addr_copy(&tcp6tw->tw_v6_rcv_saddr, &np->rcv_saddr); |
372 | tw->tw_v6_ipv6only = np->ipv6only; | 379 | tw->tw_ipv6only = np->ipv6only; |
373 | } else { | 380 | } else |
374 | memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr)); | 381 | tw->tw_ipv6only = 0; |
375 | memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr)); | ||
376 | tw->tw_v6_ipv6only = 0; | ||
377 | } | ||
378 | #endif | 382 | #endif |
379 | /* Linkage updates. */ | 383 | /* Linkage updates. */ |
380 | __tcp_tw_hashdance(sk, tw); | 384 | __tcp_tw_hashdance(sk, tw); |
@@ -392,7 +396,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
392 | } | 396 | } |
393 | 397 | ||
394 | tcp_tw_schedule(tw, timeo); | 398 | tcp_tw_schedule(tw, timeo); |
395 | tcp_tw_put(tw); | 399 | inet_twsk_put(tw); |
396 | } else { | 400 | } else { |
397 | /* Sorry, if we're out of memory, just CLOSE this | 401 | /* Sorry, if we're out of memory, just CLOSE this |
398 | * socket up. We've got bigger problems than | 402 | * socket up. We've got bigger problems than |
@@ -427,7 +431,7 @@ static u32 twkill_thread_slots; | |||
427 | /* Returns non-zero if quota exceeded. */ | 431 | /* Returns non-zero if quota exceeded. */ |
428 | static int tcp_do_twkill_work(int slot, unsigned int quota) | 432 | static int tcp_do_twkill_work(int slot, unsigned int quota) |
429 | { | 433 | { |
430 | struct tcp_tw_bucket *tw; | 434 | struct inet_timewait_sock *tw; |
431 | struct hlist_node *node; | 435 | struct hlist_node *node; |
432 | unsigned int killed; | 436 | unsigned int killed; |
433 | int ret; | 437 | int ret; |
@@ -441,11 +445,11 @@ static int tcp_do_twkill_work(int slot, unsigned int quota) | |||
441 | killed = 0; | 445 | killed = 0; |
442 | ret = 0; | 446 | ret = 0; |
443 | rescan: | 447 | rescan: |
444 | tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) { | 448 | inet_twsk_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) { |
445 | __tw_del_dead_node(tw); | 449 | __inet_twsk_del_dead_node(tw); |
446 | spin_unlock(&tw_death_lock); | 450 | spin_unlock(&tw_death_lock); |
447 | tcp_timewait_kill(tw); | 451 | tcp_timewait_kill(tw); |
448 | tcp_tw_put(tw); | 452 | inet_twsk_put(tw); |
449 | killed++; | 453 | killed++; |
450 | spin_lock(&tw_death_lock); | 454 | spin_lock(&tw_death_lock); |
451 | if (killed > quota) { | 455 | if (killed > quota) { |
@@ -531,11 +535,11 @@ static void twkill_work(void *dummy) | |||
531 | */ | 535 | */ |
532 | 536 | ||
533 | /* This is for handling early-kills of TIME_WAIT sockets. */ | 537 | /* This is for handling early-kills of TIME_WAIT sockets. */ |
534 | void tcp_tw_deschedule(struct tcp_tw_bucket *tw) | 538 | void tcp_tw_deschedule(struct inet_timewait_sock *tw) |
535 | { | 539 | { |
536 | spin_lock(&tw_death_lock); | 540 | spin_lock(&tw_death_lock); |
537 | if (tw_del_dead_node(tw)) { | 541 | if (inet_twsk_del_dead_node(tw)) { |
538 | tcp_tw_put(tw); | 542 | inet_twsk_put(tw); |
539 | if (--tcp_tw_count == 0) | 543 | if (--tcp_tw_count == 0) |
540 | del_timer(&tcp_tw_timer); | 544 | del_timer(&tcp_tw_timer); |
541 | } | 545 | } |
@@ -552,7 +556,7 @@ static struct timer_list tcp_twcal_timer = | |||
552 | TIMER_INITIALIZER(tcp_twcal_tick, 0, 0); | 556 | TIMER_INITIALIZER(tcp_twcal_tick, 0, 0); |
553 | static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS]; | 557 | static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS]; |
554 | 558 | ||
555 | static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo) | 559 | static void tcp_tw_schedule(struct inet_timewait_sock *tw, const int timeo) |
556 | { | 560 | { |
557 | struct hlist_head *list; | 561 | struct hlist_head *list; |
558 | int slot; | 562 | int slot; |
@@ -586,7 +590,7 @@ static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo) | |||
586 | spin_lock(&tw_death_lock); | 590 | spin_lock(&tw_death_lock); |
587 | 591 | ||
588 | /* Unlink it, if it was scheduled */ | 592 | /* Unlink it, if it was scheduled */ |
589 | if (tw_del_dead_node(tw)) | 593 | if (inet_twsk_del_dead_node(tw)) |
590 | tcp_tw_count--; | 594 | tcp_tw_count--; |
591 | else | 595 | else |
592 | atomic_inc(&tw->tw_refcnt); | 596 | atomic_inc(&tw->tw_refcnt); |
@@ -644,13 +648,13 @@ void tcp_twcal_tick(unsigned long dummy) | |||
644 | for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) { | 648 | for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) { |
645 | if (time_before_eq(j, now)) { | 649 | if (time_before_eq(j, now)) { |
646 | struct hlist_node *node, *safe; | 650 | struct hlist_node *node, *safe; |
647 | struct tcp_tw_bucket *tw; | 651 | struct inet_timewait_sock *tw; |
648 | 652 | ||
649 | tw_for_each_inmate_safe(tw, node, safe, | 653 | inet_twsk_for_each_inmate_safe(tw, node, safe, |
650 | &tcp_twcal_row[slot]) { | 654 | &tcp_twcal_row[slot]) { |
651 | __tw_del_dead_node(tw); | 655 | __inet_twsk_del_dead_node(tw); |
652 | tcp_timewait_kill(tw); | 656 | tcp_timewait_kill(tw); |
653 | tcp_tw_put(tw); | 657 | inet_twsk_put(tw); |
654 | killed++; | 658 | killed++; |
655 | } | 659 | } |
656 | } else { | 660 | } else { |