diff options
author | Arnaldo Carvalho de Melo <acme@mandriva.com> | 2005-08-09 23:45:03 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:55:58 -0400 |
commit | 696ab2d3bffc746fb8cf3712f066d42b9886aeed (patch) | |
tree | d0990b1d6f5fd6b3b7ddce553a16cccf6f029651 /net/ipv4/inet_timewait_sock.c | |
parent | 295ff7edb8f72b77d524759266f7524deae379b3 (diff) |
[TIMEWAIT]: Move inet_timewait_death_row routines to net/ipv4/inet_timewait_sock.c
Also export the ones that will be used in the next changeset, when
DCCP uses this infrastructure.
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_timewait_sock.c')
-rw-r--r-- | net/ipv4/inet_timewait_sock.c | 270 |
1 files changed, 270 insertions, 0 deletions
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 22882d95f646..4d1502a49852 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <net/inet_hashtables.h> | 13 | #include <net/inet_hashtables.h> |
14 | #include <net/inet_timewait_sock.h> | 14 | #include <net/inet_timewait_sock.h> |
15 | #include <net/ip.h> | ||
15 | 16 | ||
16 | /* Must be called with locally disabled BHs. */ | 17 | /* Must be called with locally disabled BHs. */ |
17 | void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo) | 18 | void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo) |
@@ -85,6 +86,8 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
85 | write_unlock(&ehead->lock); | 86 | write_unlock(&ehead->lock); |
86 | } | 87 | } |
87 | 88 | ||
89 | EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); | ||
90 | |||
88 | struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state) | 91 | struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state) |
89 | { | 92 | { |
90 | struct inet_timewait_sock *tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_slab, | 93 | struct inet_timewait_sock *tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_slab, |
@@ -112,3 +115,270 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat | |||
112 | 115 | ||
113 | return tw; | 116 | return tw; |
114 | } | 117 | } |
118 | |||
119 | EXPORT_SYMBOL_GPL(inet_twsk_alloc); | ||
120 | |||
121 | /* Returns non-zero if quota exceeded. */ | ||
122 | static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr, | ||
123 | const int slot) | ||
124 | { | ||
125 | struct inet_timewait_sock *tw; | ||
126 | struct hlist_node *node; | ||
127 | unsigned int killed; | ||
128 | int ret; | ||
129 | |||
130 | /* NOTE: compare this to previous version where lock | ||
131 | * was released after detaching chain. It was racy, | ||
132 | * because tw buckets are scheduled in not serialized context | ||
133 | * in 2.3 (with netfilter), and with softnet it is common, because | ||
134 | * soft irqs are not sequenced. | ||
135 | */ | ||
136 | killed = 0; | ||
137 | ret = 0; | ||
138 | rescan: | ||
139 | inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) { | ||
140 | __inet_twsk_del_dead_node(tw); | ||
141 | spin_unlock(&twdr->death_lock); | ||
142 | __inet_twsk_kill(tw, twdr->hashinfo); | ||
143 | inet_twsk_put(tw); | ||
144 | killed++; | ||
145 | spin_lock(&twdr->death_lock); | ||
146 | if (killed > INET_TWDR_TWKILL_QUOTA) { | ||
147 | ret = 1; | ||
148 | break; | ||
149 | } | ||
150 | |||
151 | /* While we dropped twdr->death_lock, another cpu may have | ||
152 | * killed off the next TW bucket in the list, therefore | ||
153 | * do a fresh re-read of the hlist head node with the | ||
154 | * lock reacquired. We still use the hlist traversal | ||
155 | * macro in order to get the prefetches. | ||
156 | */ | ||
157 | goto rescan; | ||
158 | } | ||
159 | |||
160 | twdr->tw_count -= killed; | ||
161 | NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed); | ||
162 | |||
163 | return ret; | ||
164 | } | ||
165 | |||
166 | void inet_twdr_hangman(unsigned long data) | ||
167 | { | ||
168 | struct inet_timewait_death_row *twdr; | ||
169 | int unsigned need_timer; | ||
170 | |||
171 | twdr = (struct inet_timewait_death_row *)data; | ||
172 | spin_lock(&twdr->death_lock); | ||
173 | |||
174 | if (twdr->tw_count == 0) | ||
175 | goto out; | ||
176 | |||
177 | need_timer = 0; | ||
178 | if (inet_twdr_do_twkill_work(twdr, twdr->slot)) { | ||
179 | twdr->thread_slots |= (1 << twdr->slot); | ||
180 | mb(); | ||
181 | schedule_work(&twdr->twkill_work); | ||
182 | need_timer = 1; | ||
183 | } else { | ||
184 | /* We purged the entire slot, anything left? */ | ||
185 | if (twdr->tw_count) | ||
186 | need_timer = 1; | ||
187 | } | ||
188 | twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1)); | ||
189 | if (need_timer) | ||
190 | mod_timer(&twdr->tw_timer, jiffies + twdr->period); | ||
191 | out: | ||
192 | spin_unlock(&twdr->death_lock); | ||
193 | } | ||
194 | |||
195 | EXPORT_SYMBOL_GPL(inet_twdr_hangman); | ||
196 | |||
197 | extern void twkill_slots_invalid(void); | ||
198 | |||
199 | void inet_twdr_twkill_work(void *data) | ||
200 | { | ||
201 | struct inet_timewait_death_row *twdr = data; | ||
202 | int i; | ||
203 | |||
204 | if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) | ||
205 | twkill_slots_invalid(); | ||
206 | |||
207 | while (twdr->thread_slots) { | ||
208 | spin_lock_bh(&twdr->death_lock); | ||
209 | for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) { | ||
210 | if (!(twdr->thread_slots & (1 << i))) | ||
211 | continue; | ||
212 | |||
213 | while (inet_twdr_do_twkill_work(twdr, i) != 0) { | ||
214 | if (need_resched()) { | ||
215 | spin_unlock_bh(&twdr->death_lock); | ||
216 | schedule(); | ||
217 | spin_lock_bh(&twdr->death_lock); | ||
218 | } | ||
219 | } | ||
220 | |||
221 | twdr->thread_slots &= ~(1 << i); | ||
222 | } | ||
223 | spin_unlock_bh(&twdr->death_lock); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | EXPORT_SYMBOL_GPL(inet_twdr_twkill_work); | ||
228 | |||
229 | /* These are always called from BH context. See callers in | ||
230 | * tcp_input.c to verify this. | ||
231 | */ | ||
232 | |||
233 | /* This is for handling early-kills of TIME_WAIT sockets. */ | ||
234 | void inet_twsk_deschedule(struct inet_timewait_sock *tw, | ||
235 | struct inet_timewait_death_row *twdr) | ||
236 | { | ||
237 | spin_lock(&twdr->death_lock); | ||
238 | if (inet_twsk_del_dead_node(tw)) { | ||
239 | inet_twsk_put(tw); | ||
240 | if (--twdr->tw_count == 0) | ||
241 | del_timer(&twdr->tw_timer); | ||
242 | } | ||
243 | spin_unlock(&twdr->death_lock); | ||
244 | __inet_twsk_kill(tw, twdr->hashinfo); | ||
245 | } | ||
246 | |||
247 | EXPORT_SYMBOL(inet_twsk_deschedule); | ||
248 | |||
249 | void inet_twsk_schedule(struct inet_timewait_sock *tw, | ||
250 | struct inet_timewait_death_row *twdr, | ||
251 | const int timeo, const int timewait_len) | ||
252 | { | ||
253 | struct hlist_head *list; | ||
254 | int slot; | ||
255 | |||
256 | /* timeout := RTO * 3.5 | ||
257 | * | ||
258 | * 3.5 = 1+2+0.5 to wait for two retransmits. | ||
259 | * | ||
260 | * RATIONALE: if FIN arrived and we entered TIME-WAIT state, | ||
261 | * our ACK acking that FIN can be lost. If N subsequent retransmitted | ||
262 | * FINs (or previous seqments) are lost (probability of such event | ||
263 | * is p^(N+1), where p is probability to lose single packet and | ||
264 | * time to detect the loss is about RTO*(2^N - 1) with exponential | ||
265 | * backoff). Normal timewait length is calculated so, that we | ||
266 | * waited at least for one retransmitted FIN (maximal RTO is 120sec). | ||
267 | * [ BTW Linux. following BSD, violates this requirement waiting | ||
268 | * only for 60sec, we should wait at least for 240 secs. | ||
269 | * Well, 240 consumes too much of resources 8) | ||
270 | * ] | ||
271 | * This interval is not reduced to catch old duplicate and | ||
272 | * responces to our wandering segments living for two MSLs. | ||
273 | * However, if we use PAWS to detect | ||
274 | * old duplicates, we can reduce the interval to bounds required | ||
275 | * by RTO, rather than MSL. So, if peer understands PAWS, we | ||
276 | * kill tw bucket after 3.5*RTO (it is important that this number | ||
277 | * is greater than TS tick!) and detect old duplicates with help | ||
278 | * of PAWS. | ||
279 | */ | ||
280 | slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK; | ||
281 | |||
282 | spin_lock(&twdr->death_lock); | ||
283 | |||
284 | /* Unlink it, if it was scheduled */ | ||
285 | if (inet_twsk_del_dead_node(tw)) | ||
286 | twdr->tw_count--; | ||
287 | else | ||
288 | atomic_inc(&tw->tw_refcnt); | ||
289 | |||
290 | if (slot >= INET_TWDR_RECYCLE_SLOTS) { | ||
291 | /* Schedule to slow timer */ | ||
292 | if (timeo >= timewait_len) { | ||
293 | slot = INET_TWDR_TWKILL_SLOTS - 1; | ||
294 | } else { | ||
295 | slot = (timeo + twdr->period - 1) / twdr->period; | ||
296 | if (slot >= INET_TWDR_TWKILL_SLOTS) | ||
297 | slot = INET_TWDR_TWKILL_SLOTS - 1; | ||
298 | } | ||
299 | tw->tw_ttd = jiffies + timeo; | ||
300 | slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1); | ||
301 | list = &twdr->cells[slot]; | ||
302 | } else { | ||
303 | tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK); | ||
304 | |||
305 | if (twdr->twcal_hand < 0) { | ||
306 | twdr->twcal_hand = 0; | ||
307 | twdr->twcal_jiffie = jiffies; | ||
308 | twdr->twcal_timer.expires = twdr->twcal_jiffie + | ||
309 | (slot << INET_TWDR_RECYCLE_TICK); | ||
310 | add_timer(&twdr->twcal_timer); | ||
311 | } else { | ||
312 | if (time_after(twdr->twcal_timer.expires, | ||
313 | jiffies + (slot << INET_TWDR_RECYCLE_TICK))) | ||
314 | mod_timer(&twdr->twcal_timer, | ||
315 | jiffies + (slot << INET_TWDR_RECYCLE_TICK)); | ||
316 | slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1); | ||
317 | } | ||
318 | list = &twdr->twcal_row[slot]; | ||
319 | } | ||
320 | |||
321 | hlist_add_head(&tw->tw_death_node, list); | ||
322 | |||
323 | if (twdr->tw_count++ == 0) | ||
324 | mod_timer(&twdr->tw_timer, jiffies + twdr->period); | ||
325 | spin_unlock(&twdr->death_lock); | ||
326 | } | ||
327 | |||
328 | EXPORT_SYMBOL_GPL(inet_twsk_schedule); | ||
329 | |||
330 | void inet_twdr_twcal_tick(unsigned long data) | ||
331 | { | ||
332 | struct inet_timewait_death_row *twdr; | ||
333 | int n, slot; | ||
334 | unsigned long j; | ||
335 | unsigned long now = jiffies; | ||
336 | int killed = 0; | ||
337 | int adv = 0; | ||
338 | |||
339 | twdr = (struct inet_timewait_death_row *)data; | ||
340 | |||
341 | spin_lock(&twdr->death_lock); | ||
342 | if (twdr->twcal_hand < 0) | ||
343 | goto out; | ||
344 | |||
345 | slot = twdr->twcal_hand; | ||
346 | j = twdr->twcal_jiffie; | ||
347 | |||
348 | for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { | ||
349 | if (time_before_eq(j, now)) { | ||
350 | struct hlist_node *node, *safe; | ||
351 | struct inet_timewait_sock *tw; | ||
352 | |||
353 | inet_twsk_for_each_inmate_safe(tw, node, safe, | ||
354 | &twdr->twcal_row[slot]) { | ||
355 | __inet_twsk_del_dead_node(tw); | ||
356 | __inet_twsk_kill(tw, twdr->hashinfo); | ||
357 | inet_twsk_put(tw); | ||
358 | killed++; | ||
359 | } | ||
360 | } else { | ||
361 | if (!adv) { | ||
362 | adv = 1; | ||
363 | twdr->twcal_jiffie = j; | ||
364 | twdr->twcal_hand = slot; | ||
365 | } | ||
366 | |||
367 | if (!hlist_empty(&twdr->twcal_row[slot])) { | ||
368 | mod_timer(&twdr->twcal_timer, j); | ||
369 | goto out; | ||
370 | } | ||
371 | } | ||
372 | j += 1 << INET_TWDR_RECYCLE_TICK; | ||
373 | slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1); | ||
374 | } | ||
375 | twdr->twcal_hand = -1; | ||
376 | |||
377 | out: | ||
378 | if ((twdr->tw_count -= killed) == 0) | ||
379 | del_timer(&twdr->tw_timer); | ||
380 | NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed); | ||
381 | spin_unlock(&twdr->death_lock); | ||
382 | } | ||
383 | |||
384 | EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); | ||