aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_minisocks.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
-rw-r--r--net/ipv4/tcp_minisocks.c605
1 files changed, 101 insertions, 504 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f42a284164b7..a88db28b0af7 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -35,13 +35,27 @@
35#define SYNC_INIT 1 35#define SYNC_INIT 1
36#endif 36#endif
37 37
38int sysctl_tcp_tw_recycle;
39int sysctl_tcp_max_tw_buckets = NR_FILE*2;
40
41int sysctl_tcp_syncookies = SYNC_INIT; 38int sysctl_tcp_syncookies = SYNC_INIT;
42int sysctl_tcp_abort_on_overflow; 39int sysctl_tcp_abort_on_overflow;
43 40
44static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo); 41struct inet_timewait_death_row tcp_death_row = {
42 .sysctl_max_tw_buckets = NR_FILE * 2,
43 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
44 .death_lock = SPIN_LOCK_UNLOCKED,
45 .hashinfo = &tcp_hashinfo,
46 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
47 (unsigned long)&tcp_death_row),
48 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
49 inet_twdr_twkill_work,
50 &tcp_death_row),
51/* Short-time timewait calendar */
52
53 .twcal_hand = -1,
54 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
55 (unsigned long)&tcp_death_row),
56};
57
58EXPORT_SYMBOL_GPL(tcp_death_row);
45 59
46static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 60static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
47{ 61{
@@ -52,47 +66,6 @@ static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
52 return (seq == e_win && seq == end_seq); 66 return (seq == e_win && seq == end_seq);
53} 67}
54 68
55/* New-style handling of TIME_WAIT sockets. */
56
57int tcp_tw_count;
58
59
60/* Must be called with locally disabled BHs. */
61static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
62{
63 struct tcp_ehash_bucket *ehead;
64 struct tcp_bind_hashbucket *bhead;
65 struct tcp_bind_bucket *tb;
66
67 /* Unlink from established hashes. */
68 ehead = &tcp_ehash[tw->tw_hashent];
69 write_lock(&ehead->lock);
70 if (hlist_unhashed(&tw->tw_node)) {
71 write_unlock(&ehead->lock);
72 return;
73 }
74 __hlist_del(&tw->tw_node);
75 sk_node_init(&tw->tw_node);
76 write_unlock(&ehead->lock);
77
78 /* Disassociate with bind bucket. */
79 bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)];
80 spin_lock(&bhead->lock);
81 tb = tw->tw_tb;
82 __hlist_del(&tw->tw_bind_node);
83 tw->tw_tb = NULL;
84 tcp_bucket_destroy(tb);
85 spin_unlock(&bhead->lock);
86
87#ifdef INET_REFCNT_DEBUG
88 if (atomic_read(&tw->tw_refcnt) != 1) {
89 printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw,
90 atomic_read(&tw->tw_refcnt));
91 }
92#endif
93 tcp_tw_put(tw);
94}
95
96/* 69/*
97 * * Main purpose of TIME-WAIT state is to close connection gracefully, 70 * * Main purpose of TIME-WAIT state is to close connection gracefully,
98 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN 71 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
@@ -122,19 +95,20 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
122 * to avoid misread sequence numbers, states etc. --ANK 95 * to avoid misread sequence numbers, states etc. --ANK
123 */ 96 */
124enum tcp_tw_status 97enum tcp_tw_status
125tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, 98tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
126 struct tcphdr *th, unsigned len) 99 const struct tcphdr *th)
127{ 100{
101 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
128 struct tcp_options_received tmp_opt; 102 struct tcp_options_received tmp_opt;
129 int paws_reject = 0; 103 int paws_reject = 0;
130 104
131 tmp_opt.saw_tstamp = 0; 105 tmp_opt.saw_tstamp = 0;
132 if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) { 106 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
133 tcp_parse_options(skb, &tmp_opt, 0); 107 tcp_parse_options(skb, &tmp_opt, 0);
134 108
135 if (tmp_opt.saw_tstamp) { 109 if (tmp_opt.saw_tstamp) {
136 tmp_opt.ts_recent = tw->tw_ts_recent; 110 tmp_opt.ts_recent = tcptw->tw_ts_recent;
137 tmp_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; 111 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
138 paws_reject = tcp_paws_check(&tmp_opt, th->rst); 112 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
139 } 113 }
140 } 114 }
@@ -145,20 +119,20 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
145 /* Out of window, send ACK */ 119 /* Out of window, send ACK */
146 if (paws_reject || 120 if (paws_reject ||
147 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 121 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
148 tw->tw_rcv_nxt, 122 tcptw->tw_rcv_nxt,
149 tw->tw_rcv_nxt + tw->tw_rcv_wnd)) 123 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
150 return TCP_TW_ACK; 124 return TCP_TW_ACK;
151 125
152 if (th->rst) 126 if (th->rst)
153 goto kill; 127 goto kill;
154 128
155 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt)) 129 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
156 goto kill_with_rst; 130 goto kill_with_rst;
157 131
158 /* Dup ACK? */ 132 /* Dup ACK? */
159 if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) || 133 if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
160 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { 134 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
161 tcp_tw_put(tw); 135 inet_twsk_put(tw);
162 return TCP_TW_SUCCESS; 136 return TCP_TW_SUCCESS;
163 } 137 }
164 138
@@ -166,19 +140,19 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
166 * reset. 140 * reset.
167 */ 141 */
168 if (!th->fin || 142 if (!th->fin ||
169 TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) { 143 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
170kill_with_rst: 144kill_with_rst:
171 tcp_tw_deschedule(tw); 145 inet_twsk_deschedule(tw, &tcp_death_row);
172 tcp_tw_put(tw); 146 inet_twsk_put(tw);
173 return TCP_TW_RST; 147 return TCP_TW_RST;
174 } 148 }
175 149
176 /* FIN arrived, enter true time-wait state. */ 150 /* FIN arrived, enter true time-wait state. */
177 tw->tw_substate = TCP_TIME_WAIT; 151 tw->tw_substate = TCP_TIME_WAIT;
178 tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; 152 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
179 if (tmp_opt.saw_tstamp) { 153 if (tmp_opt.saw_tstamp) {
180 tw->tw_ts_recent_stamp = xtime.tv_sec; 154 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
181 tw->tw_ts_recent = tmp_opt.rcv_tsval; 155 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
182 } 156 }
183 157
184 /* I am shamed, but failed to make it more elegant. 158 /* I am shamed, but failed to make it more elegant.
@@ -187,11 +161,13 @@ kill_with_rst:
187 * do not undertsnad recycling in any case, it not 161 * do not undertsnad recycling in any case, it not
188 * a big problem in practice. --ANK */ 162 * a big problem in practice. --ANK */
189 if (tw->tw_family == AF_INET && 163 if (tw->tw_family == AF_INET &&
190 sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp && 164 tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
191 tcp_v4_tw_remember_stamp(tw)) 165 tcp_v4_tw_remember_stamp(tw))
192 tcp_tw_schedule(tw, tw->tw_timeout); 166 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
167 TCP_TIMEWAIT_LEN);
193 else 168 else
194 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); 169 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
170 TCP_TIMEWAIT_LEN);
195 return TCP_TW_ACK; 171 return TCP_TW_ACK;
196 } 172 }
197 173
@@ -213,7 +189,7 @@ kill_with_rst:
213 */ 189 */
214 190
215 if (!paws_reject && 191 if (!paws_reject &&
216 (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt && 192 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
217 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { 193 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
218 /* In window segment, it may be only reset or bare ack. */ 194 /* In window segment, it may be only reset or bare ack. */
219 195
@@ -224,19 +200,20 @@ kill_with_rst:
224 */ 200 */
225 if (sysctl_tcp_rfc1337 == 0) { 201 if (sysctl_tcp_rfc1337 == 0) {
226kill: 202kill:
227 tcp_tw_deschedule(tw); 203 inet_twsk_deschedule(tw, &tcp_death_row);
228 tcp_tw_put(tw); 204 inet_twsk_put(tw);
229 return TCP_TW_SUCCESS; 205 return TCP_TW_SUCCESS;
230 } 206 }
231 } 207 }
232 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); 208 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
209 TCP_TIMEWAIT_LEN);
233 210
234 if (tmp_opt.saw_tstamp) { 211 if (tmp_opt.saw_tstamp) {
235 tw->tw_ts_recent = tmp_opt.rcv_tsval; 212 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
236 tw->tw_ts_recent_stamp = xtime.tv_sec; 213 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
237 } 214 }
238 215
239 tcp_tw_put(tw); 216 inet_twsk_put(tw);
240 return TCP_TW_SUCCESS; 217 return TCP_TW_SUCCESS;
241 } 218 }
242 219
@@ -258,9 +235,10 @@ kill:
258 */ 235 */
259 236
260 if (th->syn && !th->rst && !th->ack && !paws_reject && 237 if (th->syn && !th->rst && !th->ack && !paws_reject &&
261 (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) || 238 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
262 (tmp_opt.saw_tstamp && (s32)(tw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { 239 (tmp_opt.saw_tstamp &&
263 u32 isn = tw->tw_snd_nxt + 65535 + 2; 240 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
241 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
264 if (isn == 0) 242 if (isn == 0)
265 isn++; 243 isn++;
266 TCP_SKB_CB(skb)->when = isn; 244 TCP_SKB_CB(skb)->when = isn;
@@ -278,107 +256,57 @@ kill:
278 * Do not reschedule in the last case. 256 * Do not reschedule in the last case.
279 */ 257 */
280 if (paws_reject || th->ack) 258 if (paws_reject || th->ack)
281 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); 259 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
260 TCP_TIMEWAIT_LEN);
282 261
283 /* Send ACK. Note, we do not put the bucket, 262 /* Send ACK. Note, we do not put the bucket,
284 * it will be released by caller. 263 * it will be released by caller.
285 */ 264 */
286 return TCP_TW_ACK; 265 return TCP_TW_ACK;
287 } 266 }
288 tcp_tw_put(tw); 267 inet_twsk_put(tw);
289 return TCP_TW_SUCCESS; 268 return TCP_TW_SUCCESS;
290} 269}
291 270
292/* Enter the time wait state. This is called with locally disabled BH.
293 * Essentially we whip up a timewait bucket, copy the
294 * relevant info into it from the SK, and mess with hash chains
295 * and list linkage.
296 */
297static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
298{
299 struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
300 struct tcp_bind_hashbucket *bhead;
301
302 /* Step 1: Put TW into bind hash. Original socket stays there too.
303 Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
304 binding cache, even if it is closed.
305 */
306 bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
307 spin_lock(&bhead->lock);
308 tw->tw_tb = tcp_sk(sk)->bind_hash;
309 BUG_TRAP(tcp_sk(sk)->bind_hash);
310 tw_add_bind_node(tw, &tw->tw_tb->owners);
311 spin_unlock(&bhead->lock);
312
313 write_lock(&ehead->lock);
314
315 /* Step 2: Remove SK from established hash. */
316 if (__sk_del_node_init(sk))
317 sock_prot_dec_use(sk->sk_prot);
318
319 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
320 tw_add_node(tw, &(ehead + tcp_ehash_size)->chain);
321 atomic_inc(&tw->tw_refcnt);
322
323 write_unlock(&ehead->lock);
324}
325
326/* 271/*
327 * Move a socket to time-wait or dead fin-wait-2 state. 272 * Move a socket to time-wait or dead fin-wait-2 state.
328 */ 273 */
329void tcp_time_wait(struct sock *sk, int state, int timeo) 274void tcp_time_wait(struct sock *sk, int state, int timeo)
330{ 275{
331 struct tcp_tw_bucket *tw = NULL; 276 struct inet_timewait_sock *tw = NULL;
332 struct tcp_sock *tp = tcp_sk(sk); 277 const struct tcp_sock *tp = tcp_sk(sk);
333 int recycle_ok = 0; 278 int recycle_ok = 0;
334 279
335 if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp) 280 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
336 recycle_ok = tp->af_specific->remember_stamp(sk); 281 recycle_ok = tp->af_specific->remember_stamp(sk);
337 282
338 if (tcp_tw_count < sysctl_tcp_max_tw_buckets) 283 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
339 tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC); 284 tw = inet_twsk_alloc(sk, state);
340
341 if(tw != NULL) {
342 struct inet_sock *inet = inet_sk(sk);
343 int rto = (tp->rto<<2) - (tp->rto>>1);
344
345 /* Give us an identity. */
346 tw->tw_daddr = inet->daddr;
347 tw->tw_rcv_saddr = inet->rcv_saddr;
348 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
349 tw->tw_num = inet->num;
350 tw->tw_state = TCP_TIME_WAIT;
351 tw->tw_substate = state;
352 tw->tw_sport = inet->sport;
353 tw->tw_dport = inet->dport;
354 tw->tw_family = sk->sk_family;
355 tw->tw_reuse = sk->sk_reuse;
356 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
357 atomic_set(&tw->tw_refcnt, 1);
358 285
359 tw->tw_hashent = sk->sk_hashent; 286 if (tw != NULL) {
360 tw->tw_rcv_nxt = tp->rcv_nxt; 287 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
361 tw->tw_snd_nxt = tp->snd_nxt; 288 const struct inet_connection_sock *icsk = inet_csk(sk);
362 tw->tw_rcv_wnd = tcp_receive_window(tp); 289 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
363 tw->tw_ts_recent = tp->rx_opt.ts_recent; 290
364 tw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; 291 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
365 tw_dead_node_init(tw); 292 tcptw->tw_rcv_nxt = tp->rcv_nxt;
293 tcptw->tw_snd_nxt = tp->snd_nxt;
294 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
295 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
296 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
366 297
367#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 298#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
368 if (tw->tw_family == PF_INET6) { 299 if (tw->tw_family == PF_INET6) {
369 struct ipv6_pinfo *np = inet6_sk(sk); 300 struct ipv6_pinfo *np = inet6_sk(sk);
301 struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
370 302
371 ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr); 303 ipv6_addr_copy(&tcp6tw->tw_v6_daddr, &np->daddr);
372 ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr); 304 ipv6_addr_copy(&tcp6tw->tw_v6_rcv_saddr, &np->rcv_saddr);
373 tw->tw_v6_ipv6only = np->ipv6only; 305 tw->tw_ipv6only = np->ipv6only;
374 } else {
375 memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr));
376 memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr));
377 tw->tw_v6_ipv6only = 0;
378 } 306 }
379#endif 307#endif
380 /* Linkage updates. */ 308 /* Linkage updates. */
381 __tcp_tw_hashdance(sk, tw); 309 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
382 310
383 /* Get the TIME_WAIT timeout firing. */ 311 /* Get the TIME_WAIT timeout firing. */
384 if (timeo < rto) 312 if (timeo < rto)
@@ -392,8 +320,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
392 timeo = TCP_TIMEWAIT_LEN; 320 timeo = TCP_TIMEWAIT_LEN;
393 } 321 }
394 322
395 tcp_tw_schedule(tw, timeo); 323 inet_twsk_schedule(tw, &tcp_death_row, timeo,
396 tcp_tw_put(tw); 324 TCP_TIMEWAIT_LEN);
325 inet_twsk_put(tw);
397 } else { 326 } else {
398 /* Sorry, if we're out of memory, just CLOSE this 327 /* Sorry, if we're out of memory, just CLOSE this
399 * socket up. We've got bigger problems than 328 * socket up. We've got bigger problems than
@@ -407,277 +336,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
407 tcp_done(sk); 336 tcp_done(sk);
408} 337}
409 338
410/* Kill off TIME_WAIT sockets once their lifetime has expired. */
411static int tcp_tw_death_row_slot;
412
413static void tcp_twkill(unsigned long);
414
415/* TIME_WAIT reaping mechanism. */
416#define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
417#define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
418
419#define TCP_TWKILL_QUOTA 100
420
421static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS];
422static DEFINE_SPINLOCK(tw_death_lock);
423static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
424static void twkill_work(void *);
425static DECLARE_WORK(tcp_twkill_work, twkill_work, NULL);
426static u32 twkill_thread_slots;
427
428/* Returns non-zero if quota exceeded. */
429static int tcp_do_twkill_work(int slot, unsigned int quota)
430{
431 struct tcp_tw_bucket *tw;
432 struct hlist_node *node;
433 unsigned int killed;
434 int ret;
435
436 /* NOTE: compare this to previous version where lock
437 * was released after detaching chain. It was racy,
438 * because tw buckets are scheduled in not serialized context
439 * in 2.3 (with netfilter), and with softnet it is common, because
440 * soft irqs are not sequenced.
441 */
442 killed = 0;
443 ret = 0;
444rescan:
445 tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
446 __tw_del_dead_node(tw);
447 spin_unlock(&tw_death_lock);
448 tcp_timewait_kill(tw);
449 tcp_tw_put(tw);
450 killed++;
451 spin_lock(&tw_death_lock);
452 if (killed > quota) {
453 ret = 1;
454 break;
455 }
456
457 /* While we dropped tw_death_lock, another cpu may have
458 * killed off the next TW bucket in the list, therefore
459 * do a fresh re-read of the hlist head node with the
460 * lock reacquired. We still use the hlist traversal
461 * macro in order to get the prefetches.
462 */
463 goto rescan;
464 }
465
466 tcp_tw_count -= killed;
467 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
468
469 return ret;
470}
471
472static void tcp_twkill(unsigned long dummy)
473{
474 int need_timer, ret;
475
476 spin_lock(&tw_death_lock);
477
478 if (tcp_tw_count == 0)
479 goto out;
480
481 need_timer = 0;
482 ret = tcp_do_twkill_work(tcp_tw_death_row_slot, TCP_TWKILL_QUOTA);
483 if (ret) {
484 twkill_thread_slots |= (1 << tcp_tw_death_row_slot);
485 mb();
486 schedule_work(&tcp_twkill_work);
487 need_timer = 1;
488 } else {
489 /* We purged the entire slot, anything left? */
490 if (tcp_tw_count)
491 need_timer = 1;
492 }
493 tcp_tw_death_row_slot =
494 ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
495 if (need_timer)
496 mod_timer(&tcp_tw_timer, jiffies + TCP_TWKILL_PERIOD);
497out:
498 spin_unlock(&tw_death_lock);
499}
500
501extern void twkill_slots_invalid(void);
502
503static void twkill_work(void *dummy)
504{
505 int i;
506
507 if ((TCP_TWKILL_SLOTS - 1) > (sizeof(twkill_thread_slots) * 8))
508 twkill_slots_invalid();
509
510 while (twkill_thread_slots) {
511 spin_lock_bh(&tw_death_lock);
512 for (i = 0; i < TCP_TWKILL_SLOTS; i++) {
513 if (!(twkill_thread_slots & (1 << i)))
514 continue;
515
516 while (tcp_do_twkill_work(i, TCP_TWKILL_QUOTA) != 0) {
517 if (need_resched()) {
518 spin_unlock_bh(&tw_death_lock);
519 schedule();
520 spin_lock_bh(&tw_death_lock);
521 }
522 }
523
524 twkill_thread_slots &= ~(1 << i);
525 }
526 spin_unlock_bh(&tw_death_lock);
527 }
528}
529
530/* These are always called from BH context. See callers in
531 * tcp_input.c to verify this.
532 */
533
534/* This is for handling early-kills of TIME_WAIT sockets. */
535void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
536{
537 spin_lock(&tw_death_lock);
538 if (tw_del_dead_node(tw)) {
539 tcp_tw_put(tw);
540 if (--tcp_tw_count == 0)
541 del_timer(&tcp_tw_timer);
542 }
543 spin_unlock(&tw_death_lock);
544 tcp_timewait_kill(tw);
545}
546
547/* Short-time timewait calendar */
548
549static int tcp_twcal_hand = -1;
550static int tcp_twcal_jiffie;
551static void tcp_twcal_tick(unsigned long);
552static struct timer_list tcp_twcal_timer =
553 TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
554static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
555
556static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
557{
558 struct hlist_head *list;
559 int slot;
560
561 /* timeout := RTO * 3.5
562 *
563 * 3.5 = 1+2+0.5 to wait for two retransmits.
564 *
565 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
566 * our ACK acking that FIN can be lost. If N subsequent retransmitted
567 * FINs (or previous seqments) are lost (probability of such event
568 * is p^(N+1), where p is probability to lose single packet and
569 * time to detect the loss is about RTO*(2^N - 1) with exponential
570 * backoff). Normal timewait length is calculated so, that we
571 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
572 * [ BTW Linux. following BSD, violates this requirement waiting
573 * only for 60sec, we should wait at least for 240 secs.
574 * Well, 240 consumes too much of resources 8)
575 * ]
576 * This interval is not reduced to catch old duplicate and
577 * responces to our wandering segments living for two MSLs.
578 * However, if we use PAWS to detect
579 * old duplicates, we can reduce the interval to bounds required
580 * by RTO, rather than MSL. So, if peer understands PAWS, we
581 * kill tw bucket after 3.5*RTO (it is important that this number
582 * is greater than TS tick!) and detect old duplicates with help
583 * of PAWS.
584 */
585 slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK;
586
587 spin_lock(&tw_death_lock);
588
589 /* Unlink it, if it was scheduled */
590 if (tw_del_dead_node(tw))
591 tcp_tw_count--;
592 else
593 atomic_inc(&tw->tw_refcnt);
594
595 if (slot >= TCP_TW_RECYCLE_SLOTS) {
596 /* Schedule to slow timer */
597 if (timeo >= TCP_TIMEWAIT_LEN) {
598 slot = TCP_TWKILL_SLOTS-1;
599 } else {
600 slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD;
601 if (slot >= TCP_TWKILL_SLOTS)
602 slot = TCP_TWKILL_SLOTS-1;
603 }
604 tw->tw_ttd = jiffies + timeo;
605 slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
606 list = &tcp_tw_death_row[slot];
607 } else {
608 tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
609
610 if (tcp_twcal_hand < 0) {
611 tcp_twcal_hand = 0;
612 tcp_twcal_jiffie = jiffies;
613 tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK);
614 add_timer(&tcp_twcal_timer);
615 } else {
616 if (time_after(tcp_twcal_timer.expires, jiffies + (slot<<TCP_TW_RECYCLE_TICK)))
617 mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
618 slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
619 }
620 list = &tcp_twcal_row[slot];
621 }
622
623 hlist_add_head(&tw->tw_death_node, list);
624
625 if (tcp_tw_count++ == 0)
626 mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
627 spin_unlock(&tw_death_lock);
628}
629
630void tcp_twcal_tick(unsigned long dummy)
631{
632 int n, slot;
633 unsigned long j;
634 unsigned long now = jiffies;
635 int killed = 0;
636 int adv = 0;
637
638 spin_lock(&tw_death_lock);
639 if (tcp_twcal_hand < 0)
640 goto out;
641
642 slot = tcp_twcal_hand;
643 j = tcp_twcal_jiffie;
644
645 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
646 if (time_before_eq(j, now)) {
647 struct hlist_node *node, *safe;
648 struct tcp_tw_bucket *tw;
649
650 tw_for_each_inmate_safe(tw, node, safe,
651 &tcp_twcal_row[slot]) {
652 __tw_del_dead_node(tw);
653 tcp_timewait_kill(tw);
654 tcp_tw_put(tw);
655 killed++;
656 }
657 } else {
658 if (!adv) {
659 adv = 1;
660 tcp_twcal_jiffie = j;
661 tcp_twcal_hand = slot;
662 }
663
664 if (!hlist_empty(&tcp_twcal_row[slot])) {
665 mod_timer(&tcp_twcal_timer, j);
666 goto out;
667 }
668 }
669 j += (1<<TCP_TW_RECYCLE_TICK);
670 slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1);
671 }
672 tcp_twcal_hand = -1;
673
674out:
675 if ((tcp_tw_count -= killed) == 0)
676 del_timer(&tcp_tw_timer);
677 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
678 spin_unlock(&tw_death_lock);
679}
680
681/* This is not only more efficient than what we used to do, it eliminates 339/* This is not only more efficient than what we used to do, it eliminates
682 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM 340 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
683 * 341 *
@@ -686,75 +344,27 @@ out:
686 */ 344 */
687struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb) 345struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
688{ 346{
689 /* allocate the newsk from the same slab of the master sock, 347 struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
690 * if not, at sk_free time we'll try to free it from the wrong
691 * slabcache (i.e. is it TCPv4 or v6?), this is handled thru sk->sk_prot -acme */
692 struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0);
693 348
694 if(newsk != NULL) { 349 if (newsk != NULL) {
695 struct inet_request_sock *ireq = inet_rsk(req); 350 const struct inet_request_sock *ireq = inet_rsk(req);
696 struct tcp_request_sock *treq = tcp_rsk(req); 351 struct tcp_request_sock *treq = tcp_rsk(req);
352 struct inet_connection_sock *newicsk = inet_csk(sk);
697 struct tcp_sock *newtp; 353 struct tcp_sock *newtp;
698 struct sk_filter *filter;
699
700 memcpy(newsk, sk, sizeof(struct tcp_sock));
701 newsk->sk_state = TCP_SYN_RECV;
702
703 /* SANITY */
704 sk_node_init(&newsk->sk_node);
705 tcp_sk(newsk)->bind_hash = NULL;
706
707 /* Clone the TCP header template */
708 inet_sk(newsk)->dport = ireq->rmt_port;
709
710 sock_lock_init(newsk);
711 bh_lock_sock(newsk);
712
713 rwlock_init(&newsk->sk_dst_lock);
714 atomic_set(&newsk->sk_rmem_alloc, 0);
715 skb_queue_head_init(&newsk->sk_receive_queue);
716 atomic_set(&newsk->sk_wmem_alloc, 0);
717 skb_queue_head_init(&newsk->sk_write_queue);
718 atomic_set(&newsk->sk_omem_alloc, 0);
719 newsk->sk_wmem_queued = 0;
720 newsk->sk_forward_alloc = 0;
721
722 sock_reset_flag(newsk, SOCK_DONE);
723 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
724 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
725 newsk->sk_send_head = NULL;
726 rwlock_init(&newsk->sk_callback_lock);
727 skb_queue_head_init(&newsk->sk_error_queue);
728 newsk->sk_write_space = sk_stream_write_space;
729
730 if ((filter = newsk->sk_filter) != NULL)
731 sk_filter_charge(newsk, filter);
732
733 if (unlikely(xfrm_sk_clone_policy(newsk))) {
734 /* It is still raw copy of parent, so invalidate
735 * destructor and make plain sk_free() */
736 newsk->sk_destruct = NULL;
737 sk_free(newsk);
738 return NULL;
739 }
740 354
741 /* Now setup tcp_sock */ 355 /* Now setup tcp_sock */
742 newtp = tcp_sk(newsk); 356 newtp = tcp_sk(newsk);
743 newtp->pred_flags = 0; 357 newtp->pred_flags = 0;
744 newtp->rcv_nxt = treq->rcv_isn + 1; 358 newtp->rcv_nxt = treq->rcv_isn + 1;
745 newtp->snd_nxt = treq->snt_isn + 1; 359 newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1;
746 newtp->snd_una = treq->snt_isn + 1;
747 newtp->snd_sml = treq->snt_isn + 1;
748 360
749 tcp_prequeue_init(newtp); 361 tcp_prequeue_init(newtp);
750 362
751 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn); 363 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
752 364
753 newtp->retransmits = 0;
754 newtp->backoff = 0;
755 newtp->srtt = 0; 365 newtp->srtt = 0;
756 newtp->mdev = TCP_TIMEOUT_INIT; 366 newtp->mdev = TCP_TIMEOUT_INIT;
757 newtp->rto = TCP_TIMEOUT_INIT; 367 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
758 368
759 newtp->packets_out = 0; 369 newtp->packets_out = 0;
760 newtp->left_out = 0; 370 newtp->left_out = 0;
@@ -774,9 +384,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
774 newtp->frto_counter = 0; 384 newtp->frto_counter = 0;
775 newtp->frto_highmark = 0; 385 newtp->frto_highmark = 0;
776 386
777 newtp->ca_ops = &tcp_reno; 387 newicsk->icsk_ca_ops = &tcp_reno;
778 388
779 tcp_set_ca_state(newtp, TCP_CA_Open); 389 tcp_set_ca_state(newsk, TCP_CA_Open);
780 tcp_init_xmit_timers(newsk); 390 tcp_init_xmit_timers(newsk);
781 skb_queue_head_init(&newtp->out_of_order_queue); 391 skb_queue_head_init(&newtp->out_of_order_queue);
782 newtp->rcv_wup = treq->rcv_isn + 1; 392 newtp->rcv_wup = treq->rcv_isn + 1;
@@ -789,26 +399,12 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
789 newtp->rx_opt.dsack = 0; 399 newtp->rx_opt.dsack = 0;
790 newtp->rx_opt.eff_sacks = 0; 400 newtp->rx_opt.eff_sacks = 0;
791 401
792 newtp->probes_out = 0;
793 newtp->rx_opt.num_sacks = 0; 402 newtp->rx_opt.num_sacks = 0;
794 newtp->urg_data = 0; 403 newtp->urg_data = 0;
795 /* Deinitialize accept_queue to trap illegal accesses. */
796 memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue));
797
798 /* Back to base struct sock members. */
799 newsk->sk_err = 0;
800 newsk->sk_priority = 0;
801 atomic_set(&newsk->sk_refcnt, 2);
802#ifdef INET_REFCNT_DEBUG
803 atomic_inc(&inet_sock_nr);
804#endif
805 atomic_inc(&tcp_sockets_allocated);
806 404
807 if (sock_flag(newsk, SOCK_KEEPOPEN)) 405 if (sock_flag(newsk, SOCK_KEEPOPEN))
808 tcp_reset_keepalive_timer(newsk, 406 inet_csk_reset_keepalive_timer(newsk,
809 keepalive_time_when(newtp)); 407 keepalive_time_when(newtp));
810 newsk->sk_socket = NULL;
811 newsk->sk_sleep = NULL;
812 408
813 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 409 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
814 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { 410 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
@@ -838,7 +434,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
838 newtp->tcp_header_len = sizeof(struct tcphdr); 434 newtp->tcp_header_len = sizeof(struct tcphdr);
839 } 435 }
840 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) 436 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
841 newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len; 437 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
842 newtp->rx_opt.mss_clamp = req->mss; 438 newtp->rx_opt.mss_clamp = req->mss;
843 TCP_ECN_openreq_child(newtp, req); 439 TCP_ECN_openreq_child(newtp, req);
844 if (newtp->ecn_flags&TCP_ECN_OK) 440 if (newtp->ecn_flags&TCP_ECN_OK)
@@ -934,9 +530,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
934 does sequence test, SYN is truncated, and thus we consider 530 does sequence test, SYN is truncated, and thus we consider
935 it a bare ACK. 531 it a bare ACK.
936 532
937 If tp->defer_accept, we silently drop this bare ACK. Otherwise, 533 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
938 we create an established connection. Both ends (listening sockets) 534 bare ACK. Otherwise, we create an established connection. Both
939 accept the new incoming connection and try to talk to each other. 8-) 535 ends (listening sockets) accept the new incoming connection and try
536 to talk to each other. 8-)
940 537
941 Note: This case is both harmless, and rare. Possibility is about the 538 Note: This case is both harmless, and rare. Possibility is about the
942 same as us discovering intelligent life on another plant tomorrow. 539 same as us discovering intelligent life on another plant tomorrow.
@@ -1003,7 +600,8 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
1003 return NULL; 600 return NULL;
1004 601
1005 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ 602 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
1006 if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 603 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
604 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
1007 inet_rsk(req)->acked = 1; 605 inet_rsk(req)->acked = 1;
1008 return NULL; 606 return NULL;
1009 } 607 }
@@ -1018,10 +616,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
1018 if (child == NULL) 616 if (child == NULL)
1019 goto listen_overflow; 617 goto listen_overflow;
1020 618
1021 tcp_synq_unlink(tp, req, prev); 619 inet_csk_reqsk_queue_unlink(sk, req, prev);
1022 tcp_synq_removed(sk, req); 620 inet_csk_reqsk_queue_removed(sk, req);
1023 621
1024 tcp_acceptq_queue(sk, req, child); 622 inet_csk_reqsk_queue_add(sk, req, child);
1025 return child; 623 return child;
1026 624
1027 listen_overflow: 625 listen_overflow:
@@ -1035,7 +633,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
1035 if (!(flg & TCP_FLAG_RST)) 633 if (!(flg & TCP_FLAG_RST))
1036 req->rsk_ops->send_reset(skb); 634 req->rsk_ops->send_reset(skb);
1037 635
1038 tcp_synq_drop(sk, req, prev); 636 inet_csk_reqsk_queue_drop(sk, req, prev);
1039 return NULL; 637 return NULL;
1040} 638}
1041 639
@@ -1074,4 +672,3 @@ EXPORT_SYMBOL(tcp_check_req);
1074EXPORT_SYMBOL(tcp_child_process); 672EXPORT_SYMBOL(tcp_child_process);
1075EXPORT_SYMBOL(tcp_create_openreq_child); 673EXPORT_SYMBOL(tcp_create_openreq_child);
1076EXPORT_SYMBOL(tcp_timewait_state_process); 674EXPORT_SYMBOL(tcp_timewait_state_process);
1077EXPORT_SYMBOL(tcp_tw_deschedule);