diff options
| author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2005-08-09 23:11:08 -0400 |
|---|---|---|
| committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:49:14 -0400 |
| commit | 3f421baa4720b708022f8bcc52a61e5cd6f10bf8 (patch) | |
| tree | e4201b1e2356cea8b7bd8d68dfba06e84002a77d /net/ipv4/tcp.c | |
| parent | 463c84b97f24010a67cd871746d6a7e4c925a5f9 (diff) | |
[NET]: Just move the inet_connection_sock function from tcp sources
Completing the previous changeset, this also generalises tcp_v4_synq_add,
renaming it to inet_csk_reqsk_queue_hash_add, already geing used in the
DCCP tree, which I plan to merge RSN.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
| -rw-r--r-- | net/ipv4/tcp.c | 93 |
1 files changed, 0 insertions, 93 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 8177b86570db..581016a6a93f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -1805,98 +1805,6 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
| 1805 | } | 1805 | } |
| 1806 | 1806 | ||
| 1807 | /* | 1807 | /* |
| 1808 | * Wait for an incoming connection, avoid race | ||
| 1809 | * conditions. This must be called with the socket locked. | ||
| 1810 | */ | ||
| 1811 | static int wait_for_connect(struct sock *sk, long timeo) | ||
| 1812 | { | ||
| 1813 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
| 1814 | DEFINE_WAIT(wait); | ||
| 1815 | int err; | ||
| 1816 | |||
| 1817 | /* | ||
| 1818 | * True wake-one mechanism for incoming connections: only | ||
| 1819 | * one process gets woken up, not the 'whole herd'. | ||
| 1820 | * Since we do not 'race & poll' for established sockets | ||
| 1821 | * anymore, the common case will execute the loop only once. | ||
| 1822 | * | ||
| 1823 | * Subtle issue: "add_wait_queue_exclusive()" will be added | ||
| 1824 | * after any current non-exclusive waiters, and we know that | ||
| 1825 | * it will always _stay_ after any new non-exclusive waiters | ||
| 1826 | * because all non-exclusive waiters are added at the | ||
| 1827 | * beginning of the wait-queue. As such, it's ok to "drop" | ||
| 1828 | * our exclusiveness temporarily when we get woken up without | ||
| 1829 | * having to remove and re-insert us on the wait queue. | ||
| 1830 | */ | ||
| 1831 | for (;;) { | ||
| 1832 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, | ||
| 1833 | TASK_INTERRUPTIBLE); | ||
| 1834 | release_sock(sk); | ||
| 1835 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) | ||
| 1836 | timeo = schedule_timeout(timeo); | ||
| 1837 | lock_sock(sk); | ||
| 1838 | err = 0; | ||
| 1839 | if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) | ||
| 1840 | break; | ||
| 1841 | err = -EINVAL; | ||
| 1842 | if (sk->sk_state != TCP_LISTEN) | ||
| 1843 | break; | ||
| 1844 | err = sock_intr_errno(timeo); | ||
| 1845 | if (signal_pending(current)) | ||
| 1846 | break; | ||
| 1847 | err = -EAGAIN; | ||
| 1848 | if (!timeo) | ||
| 1849 | break; | ||
| 1850 | } | ||
| 1851 | finish_wait(sk->sk_sleep, &wait); | ||
| 1852 | return err; | ||
| 1853 | } | ||
| 1854 | |||
| 1855 | /* | ||
| 1856 | * This will accept the next outstanding connection. | ||
| 1857 | */ | ||
| 1858 | |||
| 1859 | struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) | ||
| 1860 | { | ||
| 1861 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
| 1862 | struct sock *newsk; | ||
| 1863 | int error; | ||
| 1864 | |||
| 1865 | lock_sock(sk); | ||
| 1866 | |||
| 1867 | /* We need to make sure that this socket is listening, | ||
| 1868 | * and that it has something pending. | ||
| 1869 | */ | ||
| 1870 | error = -EINVAL; | ||
| 1871 | if (sk->sk_state != TCP_LISTEN) | ||
| 1872 | goto out_err; | ||
| 1873 | |||
| 1874 | /* Find already established connection */ | ||
| 1875 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) { | ||
| 1876 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); | ||
| 1877 | |||
| 1878 | /* If this is a non blocking socket don't sleep */ | ||
| 1879 | error = -EAGAIN; | ||
| 1880 | if (!timeo) | ||
| 1881 | goto out_err; | ||
| 1882 | |||
| 1883 | error = wait_for_connect(sk, timeo); | ||
| 1884 | if (error) | ||
| 1885 | goto out_err; | ||
| 1886 | } | ||
| 1887 | |||
| 1888 | newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); | ||
| 1889 | BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); | ||
| 1890 | out: | ||
| 1891 | release_sock(sk); | ||
| 1892 | return newsk; | ||
| 1893 | out_err: | ||
| 1894 | newsk = NULL; | ||
| 1895 | *err = error; | ||
| 1896 | goto out; | ||
| 1897 | } | ||
| 1898 | |||
| 1899 | /* | ||
| 1900 | * Socket option code for TCP. | 1808 | * Socket option code for TCP. |
| 1901 | */ | 1809 | */ |
| 1902 | int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | 1810 | int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, |
| @@ -2344,7 +2252,6 @@ void __init tcp_init(void) | |||
| 2344 | tcp_register_congestion_control(&tcp_reno); | 2252 | tcp_register_congestion_control(&tcp_reno); |
| 2345 | } | 2253 | } |
| 2346 | 2254 | ||
| 2347 | EXPORT_SYMBOL(inet_csk_accept); | ||
| 2348 | EXPORT_SYMBOL(tcp_close); | 2255 | EXPORT_SYMBOL(tcp_close); |
| 2349 | EXPORT_SYMBOL(tcp_destroy_sock); | 2256 | EXPORT_SYMBOL(tcp_destroy_sock); |
| 2350 | EXPORT_SYMBOL(tcp_disconnect); | 2257 | EXPORT_SYMBOL(tcp_disconnect); |
