diff options
author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2005-06-19 01:47:59 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-06-19 01:47:59 -0400 |
commit | 0e87506fcc734647c7b2497eee4eb81e785c857a (patch) | |
tree | bb8863c59fdef2628f17b6773c52801792a57722 /net/ipv4/tcp.c | |
parent | 60236fdd08b2169045a3bbfc5ffe1576e6c3c17b (diff) |
[NET] Generalise tcp_listen_opt
This chunks out the accept_queue and tcp_listen_opt code and moves
them to net/core/request_sock.c and include/net/request_sock.h, to
make it useful for other transport protocols, DCCP being the first one
to use it.
Next patches will rename tcp_listen_opt to accept_sock and remove the
inline tcp functions that just call a reqsk_queue_ function.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 67 |
1 files changed, 21 insertions, 46 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1c29feb6b35f..b85a46dd40a0 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -316,7 +316,7 @@ EXPORT_SYMBOL(tcp_enter_memory_pressure); | |||
316 | static __inline__ unsigned int tcp_listen_poll(struct sock *sk, | 316 | static __inline__ unsigned int tcp_listen_poll(struct sock *sk, |
317 | poll_table *wait) | 317 | poll_table *wait) |
318 | { | 318 | { |
319 | return tcp_sk(sk)->accept_queue ? (POLLIN | POLLRDNORM) : 0; | 319 | return !reqsk_queue_empty(&tcp_sk(sk)->accept_queue) ? (POLLIN | POLLRDNORM) : 0; |
320 | } | 320 | } |
321 | 321 | ||
322 | /* | 322 | /* |
@@ -462,28 +462,15 @@ int tcp_listen_start(struct sock *sk) | |||
462 | { | 462 | { |
463 | struct inet_sock *inet = inet_sk(sk); | 463 | struct inet_sock *inet = inet_sk(sk); |
464 | struct tcp_sock *tp = tcp_sk(sk); | 464 | struct tcp_sock *tp = tcp_sk(sk); |
465 | struct tcp_listen_opt *lopt; | 465 | int rc = reqsk_queue_alloc(&tp->accept_queue, TCP_SYNQ_HSIZE); |
466 | |||
467 | if (rc != 0) | ||
468 | return rc; | ||
466 | 469 | ||
467 | sk->sk_max_ack_backlog = 0; | 470 | sk->sk_max_ack_backlog = 0; |
468 | sk->sk_ack_backlog = 0; | 471 | sk->sk_ack_backlog = 0; |
469 | tp->accept_queue = tp->accept_queue_tail = NULL; | ||
470 | rwlock_init(&tp->syn_wait_lock); | ||
471 | tcp_delack_init(tp); | 472 | tcp_delack_init(tp); |
472 | 473 | ||
473 | lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL); | ||
474 | if (!lopt) | ||
475 | return -ENOMEM; | ||
476 | |||
477 | memset(lopt, 0, sizeof(struct tcp_listen_opt)); | ||
478 | for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++) | ||
479 | if ((1 << lopt->max_qlen_log) >= sysctl_max_syn_backlog) | ||
480 | break; | ||
481 | get_random_bytes(&lopt->hash_rnd, 4); | ||
482 | |||
483 | write_lock_bh(&tp->syn_wait_lock); | ||
484 | tp->listen_opt = lopt; | ||
485 | write_unlock_bh(&tp->syn_wait_lock); | ||
486 | |||
487 | /* There is race window here: we announce ourselves listening, | 474 | /* There is race window here: we announce ourselves listening, |
488 | * but this transition is still not validated by get_port(). | 475 | * but this transition is still not validated by get_port(). |
489 | * It is OK, because this socket enters to hash table only | 476 | * It is OK, because this socket enters to hash table only |
@@ -500,10 +487,7 @@ int tcp_listen_start(struct sock *sk) | |||
500 | } | 487 | } |
501 | 488 | ||
502 | sk->sk_state = TCP_CLOSE; | 489 | sk->sk_state = TCP_CLOSE; |
503 | write_lock_bh(&tp->syn_wait_lock); | 490 | reqsk_queue_destroy(&tp->accept_queue); |
504 | tp->listen_opt = NULL; | ||
505 | write_unlock_bh(&tp->syn_wait_lock); | ||
506 | kfree(lopt); | ||
507 | return -EADDRINUSE; | 491 | return -EADDRINUSE; |
508 | } | 492 | } |
509 | 493 | ||
@@ -515,18 +499,16 @@ int tcp_listen_start(struct sock *sk) | |||
515 | static void tcp_listen_stop (struct sock *sk) | 499 | static void tcp_listen_stop (struct sock *sk) |
516 | { | 500 | { |
517 | struct tcp_sock *tp = tcp_sk(sk); | 501 | struct tcp_sock *tp = tcp_sk(sk); |
518 | struct tcp_listen_opt *lopt = tp->listen_opt; | 502 | struct tcp_listen_opt *lopt; |
519 | struct request_sock *acc_req = tp->accept_queue; | 503 | struct request_sock *acc_req; |
520 | struct request_sock *req; | 504 | struct request_sock *req; |
521 | int i; | 505 | int i; |
522 | 506 | ||
523 | tcp_delete_keepalive_timer(sk); | 507 | tcp_delete_keepalive_timer(sk); |
524 | 508 | ||
525 | /* make all the listen_opt local to us */ | 509 | /* make all the listen_opt local to us */ |
526 | write_lock_bh(&tp->syn_wait_lock); | 510 | lopt = reqsk_queue_yank_listen_sk(&tp->accept_queue); |
527 | tp->listen_opt = NULL; | 511 | acc_req = reqsk_queue_yank_acceptq(&tp->accept_queue); |
528 | write_unlock_bh(&tp->syn_wait_lock); | ||
529 | tp->accept_queue = tp->accept_queue_tail = NULL; | ||
530 | 512 | ||
531 | if (lopt->qlen) { | 513 | if (lopt->qlen) { |
532 | for (i = 0; i < TCP_SYNQ_HSIZE; i++) { | 514 | for (i = 0; i < TCP_SYNQ_HSIZE; i++) { |
@@ -1867,11 +1849,11 @@ static int wait_for_connect(struct sock *sk, long timeo) | |||
1867 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, | 1849 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, |
1868 | TASK_INTERRUPTIBLE); | 1850 | TASK_INTERRUPTIBLE); |
1869 | release_sock(sk); | 1851 | release_sock(sk); |
1870 | if (!tp->accept_queue) | 1852 | if (reqsk_queue_empty(&tp->accept_queue)) |
1871 | timeo = schedule_timeout(timeo); | 1853 | timeo = schedule_timeout(timeo); |
1872 | lock_sock(sk); | 1854 | lock_sock(sk); |
1873 | err = 0; | 1855 | err = 0; |
1874 | if (tp->accept_queue) | 1856 | if (!reqsk_queue_empty(&tp->accept_queue)) |
1875 | break; | 1857 | break; |
1876 | err = -EINVAL; | 1858 | err = -EINVAL; |
1877 | if (sk->sk_state != TCP_LISTEN) | 1859 | if (sk->sk_state != TCP_LISTEN) |
@@ -1894,7 +1876,6 @@ static int wait_for_connect(struct sock *sk, long timeo) | |||
1894 | struct sock *tcp_accept(struct sock *sk, int flags, int *err) | 1876 | struct sock *tcp_accept(struct sock *sk, int flags, int *err) |
1895 | { | 1877 | { |
1896 | struct tcp_sock *tp = tcp_sk(sk); | 1878 | struct tcp_sock *tp = tcp_sk(sk); |
1897 | struct request_sock *req; | ||
1898 | struct sock *newsk; | 1879 | struct sock *newsk; |
1899 | int error; | 1880 | int error; |
1900 | 1881 | ||
@@ -1905,37 +1886,31 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err) | |||
1905 | */ | 1886 | */ |
1906 | error = -EINVAL; | 1887 | error = -EINVAL; |
1907 | if (sk->sk_state != TCP_LISTEN) | 1888 | if (sk->sk_state != TCP_LISTEN) |
1908 | goto out; | 1889 | goto out_err; |
1909 | 1890 | ||
1910 | /* Find already established connection */ | 1891 | /* Find already established connection */ |
1911 | if (!tp->accept_queue) { | 1892 | if (reqsk_queue_empty(&tp->accept_queue)) { |
1912 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); | 1893 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); |
1913 | 1894 | ||
1914 | /* If this is a non blocking socket don't sleep */ | 1895 | /* If this is a non blocking socket don't sleep */ |
1915 | error = -EAGAIN; | 1896 | error = -EAGAIN; |
1916 | if (!timeo) | 1897 | if (!timeo) |
1917 | goto out; | 1898 | goto out_err; |
1918 | 1899 | ||
1919 | error = wait_for_connect(sk, timeo); | 1900 | error = wait_for_connect(sk, timeo); |
1920 | if (error) | 1901 | if (error) |
1921 | goto out; | 1902 | goto out_err; |
1922 | } | 1903 | } |
1923 | 1904 | ||
1924 | req = tp->accept_queue; | 1905 | newsk = reqsk_queue_get_child(&tp->accept_queue, sk); |
1925 | if ((tp->accept_queue = req->dl_next) == NULL) | ||
1926 | tp->accept_queue_tail = NULL; | ||
1927 | |||
1928 | newsk = req->sk; | ||
1929 | sk_acceptq_removed(sk); | ||
1930 | __reqsk_free(req); | ||
1931 | BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); | 1906 | BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); |
1932 | release_sock(sk); | ||
1933 | return newsk; | ||
1934 | |||
1935 | out: | 1907 | out: |
1936 | release_sock(sk); | 1908 | release_sock(sk); |
1909 | return newsk; | ||
1910 | out_err: | ||
1911 | newsk = NULL; | ||
1937 | *err = error; | 1912 | *err = error; |
1938 | return NULL; | 1913 | goto out; |
1939 | } | 1914 | } |
1940 | 1915 | ||
1941 | /* | 1916 | /* |