aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@ghostprotocols.net>2005-08-09 22:33:31 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:32:11 -0400
commit83e3609eba3818f6e18b8bf9442195169ac306b7 (patch)
treeb0dd71b7e5ea6e8b17813cf6b3736a1b0f443ab7 /net
parent080774a243f56ce2195ace96fba3d18548ee48ce (diff)
[REQSK]: Move the syn_table destroy from tcp_listen_stop to reqsk_queue_destroy
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/request_sock.c26
-rw-r--r--net/ipv4/tcp.c35
2 files changed, 36 insertions, 25 deletions
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index bb55675f0685..4e99ce5c08f2 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -53,6 +53,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
53 get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); 53 get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
54 rwlock_init(&queue->syn_wait_lock); 54 rwlock_init(&queue->syn_wait_lock);
55 queue->rskq_accept_head = queue->rskq_accept_head = NULL; 55 queue->rskq_accept_head = queue->rskq_accept_head = NULL;
56 lopt->nr_table_entries = nr_table_entries;
56 57
57 write_lock_bh(&queue->syn_wait_lock); 58 write_lock_bh(&queue->syn_wait_lock);
58 queue->listen_opt = lopt; 59 queue->listen_opt = lopt;
@@ -62,3 +63,28 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
62} 63}
63 64
64EXPORT_SYMBOL(reqsk_queue_alloc); 65EXPORT_SYMBOL(reqsk_queue_alloc);
66
67void reqsk_queue_destroy(struct request_sock_queue *queue)
68{
69 /* make all the listen_opt local to us */
70 struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
71
72 if (lopt->qlen != 0) {
73 int i;
74
75 for (i = 0; i < lopt->nr_table_entries; i++) {
76 struct request_sock *req;
77
78 while ((req = lopt->syn_table[i]) != NULL) {
79 lopt->syn_table[i] = req->dl_next;
80 lopt->qlen--;
81 reqsk_free(req);
82 }
83 }
84 }
85
86 BUG_TRAP(lopt->qlen == 0);
87 kfree(lopt);
88}
89
90EXPORT_SYMBOL(reqsk_queue_destroy);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index d2696af46c70..42a2e2ccd430 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -487,7 +487,7 @@ int tcp_listen_start(struct sock *sk)
487 } 487 }
488 488
489 sk->sk_state = TCP_CLOSE; 489 sk->sk_state = TCP_CLOSE;
490 reqsk_queue_destroy(&tp->accept_queue); 490 __reqsk_queue_destroy(&tp->accept_queue);
491 return -EADDRINUSE; 491 return -EADDRINUSE;
492} 492}
493 493
@@ -499,38 +499,23 @@ int tcp_listen_start(struct sock *sk)
499static void tcp_listen_stop (struct sock *sk) 499static void tcp_listen_stop (struct sock *sk)
500{ 500{
501 struct tcp_sock *tp = tcp_sk(sk); 501 struct tcp_sock *tp = tcp_sk(sk);
502 struct listen_sock *lopt;
503 struct request_sock *acc_req; 502 struct request_sock *acc_req;
504 struct request_sock *req; 503 struct request_sock *req;
505 int i;
506 504
507 tcp_delete_keepalive_timer(sk); 505 tcp_delete_keepalive_timer(sk);
508 506
509 /* make all the listen_opt local to us */ 507 /* make all the listen_opt local to us */
510 lopt = reqsk_queue_yank_listen_sk(&tp->accept_queue);
511 acc_req = reqsk_queue_yank_acceptq(&tp->accept_queue); 508 acc_req = reqsk_queue_yank_acceptq(&tp->accept_queue);
512 509
513 if (lopt->qlen) { 510 /* Following specs, it would be better either to send FIN
514 for (i = 0; i < TCP_SYNQ_HSIZE; i++) { 511 * (and enter FIN-WAIT-1, it is normal close)
515 while ((req = lopt->syn_table[i]) != NULL) { 512 * or to send active reset (abort).
516 lopt->syn_table[i] = req->dl_next; 513 * Certainly, it is pretty dangerous while synflood, but it is
517 lopt->qlen--; 514 * bad justification for our negligence 8)
518 reqsk_free(req); 515 * To be honest, we are not able to make either
519 516 * of the variants now. --ANK
520 /* Following specs, it would be better either to send FIN 517 */
521 * (and enter FIN-WAIT-1, it is normal close) 518 reqsk_queue_destroy(&tp->accept_queue);
522 * or to send active reset (abort).
523 * Certainly, it is pretty dangerous while synflood, but it is
524 * bad justification for our negligence 8)
525 * To be honest, we are not able to make either
526 * of the variants now. --ANK
527 */
528 }
529 }
530 }
531 BUG_TRAP(!lopt->qlen);
532
533 kfree(lopt);
534 519
535 while ((req = acc_req) != NULL) { 520 while ((req = acc_req) != NULL) {
536 struct sock *child = req->sk; 521 struct sock *child = req->sk;