aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@ghostprotocols.net>2005-08-09 23:15:09 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:49:50 -0400
commita019d6fe2b9da68ea4ba6cf3c4e86fc1dbf554c3 (patch)
treef82f0523c313228d64998fac30790edcfd0785c3 /net/ipv4/tcp.c
parent7c657876b63cb1d8a2ec06f8fc6c37bb8412e66c (diff)
[ICSK]: Move generalised functions from tcp to inet_connection_sock
This also improves reqsk_queue_prune and renames it to inet_csk_reqsk_queue_prune, as it deals with both inet_connection_sock and inet_request_sock objects, not just with request_sock ones thus belonging to inet_request_sock. Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c120
1 files changed, 0 insertions, 120 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index a4e9eec44895..4bda522d25cf 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -456,96 +456,6 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
456 return put_user(answ, (int __user *)arg); 456 return put_user(answ, (int __user *)arg);
457} 457}
458 458
459int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
460{
461 struct inet_sock *inet = inet_sk(sk);
462 struct inet_connection_sock *icsk = inet_csk(sk);
463 int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
464
465 if (rc != 0)
466 return rc;
467
468 sk->sk_max_ack_backlog = 0;
469 sk->sk_ack_backlog = 0;
470 inet_csk_delack_init(sk);
471
472 /* There is race window here: we announce ourselves listening,
473 * but this transition is still not validated by get_port().
474 * It is OK, because this socket enters to hash table only
475 * after validation is complete.
476 */
477 sk->sk_state = TCP_LISTEN;
478 if (!sk->sk_prot->get_port(sk, inet->num)) {
479 inet->sport = htons(inet->num);
480
481 sk_dst_reset(sk);
482 sk->sk_prot->hash(sk);
483
484 return 0;
485 }
486
487 sk->sk_state = TCP_CLOSE;
488 __reqsk_queue_destroy(&icsk->icsk_accept_queue);
489 return -EADDRINUSE;
490}
491
492EXPORT_SYMBOL_GPL(inet_csk_listen_start);
493
494/*
495 * This routine closes sockets which have been at least partially
496 * opened, but not yet accepted.
497 */
498void inet_csk_listen_stop(struct sock *sk)
499{
500 struct inet_connection_sock *icsk = inet_csk(sk);
501 struct request_sock *acc_req;
502 struct request_sock *req;
503
504 inet_csk_delete_keepalive_timer(sk);
505
506 /* make all the listen_opt local to us */
507 acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
508
509 /* Following specs, it would be better either to send FIN
510 * (and enter FIN-WAIT-1, it is normal close)
511 * or to send active reset (abort).
512 * Certainly, it is pretty dangerous while synflood, but it is
513 * bad justification for our negligence 8)
514 * To be honest, we are not able to make either
515 * of the variants now. --ANK
516 */
517 reqsk_queue_destroy(&icsk->icsk_accept_queue);
518
519 while ((req = acc_req) != NULL) {
520 struct sock *child = req->sk;
521
522 acc_req = req->dl_next;
523
524 local_bh_disable();
525 bh_lock_sock(child);
526 BUG_TRAP(!sock_owned_by_user(child));
527 sock_hold(child);
528
529 sk->sk_prot->disconnect(child, O_NONBLOCK);
530
531 sock_orphan(child);
532
533 atomic_inc(sk->sk_prot->orphan_count);
534
535 inet_csk_destroy_sock(child);
536
537 bh_unlock_sock(child);
538 local_bh_enable();
539 sock_put(child);
540
541 sk_acceptq_removed(sk);
542 __reqsk_free(req);
543 }
544 BUG_TRAP(!sk->sk_ack_backlog);
545}
546
547EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
548
549static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 459static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
550{ 460{
551 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 461 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
@@ -1559,35 +1469,6 @@ void tcp_shutdown(struct sock *sk, int how)
1559 } 1469 }
1560} 1470}
1561 1471
1562/*
1563 * At this point, there should be no process reference to this
1564 * socket, and thus no user references at all. Therefore we
1565 * can assume the socket waitqueue is inactive and nobody will
1566 * try to jump onto it.
1567 */
1568void inet_csk_destroy_sock(struct sock *sk)
1569{
1570 BUG_TRAP(sk->sk_state == TCP_CLOSE);
1571 BUG_TRAP(sock_flag(sk, SOCK_DEAD));
1572
1573 /* It cannot be in hash table! */
1574 BUG_TRAP(sk_unhashed(sk));
1575
1576 /* If it has not 0 inet_sk(sk)->num, it must be bound */
1577 BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash);
1578
1579 sk->sk_prot->destroy(sk);
1580
1581 sk_stream_kill_queues(sk);
1582
1583 xfrm_sk_free_policy(sk);
1584
1585 sk_refcnt_debug_release(sk);
1586
1587 atomic_dec(sk->sk_prot->orphan_count);
1588 sock_put(sk);
1589}
1590
1591void tcp_close(struct sock *sk, long timeout) 1472void tcp_close(struct sock *sk, long timeout)
1592{ 1473{
1593 struct sk_buff *skb; 1474 struct sk_buff *skb;
@@ -2258,7 +2139,6 @@ void __init tcp_init(void)
2258} 2139}
2259 2140
2260EXPORT_SYMBOL(tcp_close); 2141EXPORT_SYMBOL(tcp_close);
2261EXPORT_SYMBOL(inet_csk_destroy_sock);
2262EXPORT_SYMBOL(tcp_disconnect); 2142EXPORT_SYMBOL(tcp_disconnect);
2263EXPORT_SYMBOL(tcp_getsockopt); 2143EXPORT_SYMBOL(tcp_getsockopt);
2264EXPORT_SYMBOL(tcp_ioctl); 2144EXPORT_SYMBOL(tcp_ioctl);