diff options
author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2005-06-19 01:47:59 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-06-19 01:47:59 -0400 |
commit | 0e87506fcc734647c7b2497eee4eb81e785c857a (patch) | |
tree | bb8863c59fdef2628f17b6773c52801792a57722 /net/ipv4/tcp_ipv4.c | |
parent | 60236fdd08b2169045a3bbfc5ffe1576e6c3c17b (diff) |
[NET] Generalise tcp_listen_opt
This chunks out the accept_queue and tcp_listen_opt code and moves
them to net/core/request_sock.c and include/net/request_sock.h, to
make it useful for other transport protocols, DCCP being the first one
to use it.
Next patches will rename tcp_listen_opt to accept_sock and remove the
inline tcp functions that just call a reqsk_queue_ function.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 32 |
1 files changed, 12 insertions, 20 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 95528a75a63d..1745dc8d25e6 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -874,7 +874,7 @@ static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp, | |||
874 | __u16 rport, | 874 | __u16 rport, |
875 | __u32 raddr, __u32 laddr) | 875 | __u32 raddr, __u32 laddr) |
876 | { | 876 | { |
877 | struct tcp_listen_opt *lopt = tp->listen_opt; | 877 | struct tcp_listen_opt *lopt = tp->accept_queue.listen_opt; |
878 | struct request_sock *req, **prev; | 878 | struct request_sock *req, **prev; |
879 | 879 | ||
880 | for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)]; | 880 | for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)]; |
@@ -898,18 +898,10 @@ static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp, | |||
898 | static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req) | 898 | static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req) |
899 | { | 899 | { |
900 | struct tcp_sock *tp = tcp_sk(sk); | 900 | struct tcp_sock *tp = tcp_sk(sk); |
901 | struct tcp_listen_opt *lopt = tp->listen_opt; | 901 | struct tcp_listen_opt *lopt = tp->accept_queue.listen_opt; |
902 | u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); | 902 | u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); |
903 | 903 | ||
904 | req->expires = jiffies + TCP_TIMEOUT_INIT; | 904 | reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT); |
905 | req->retrans = 0; | ||
906 | req->sk = NULL; | ||
907 | req->dl_next = lopt->syn_table[h]; | ||
908 | |||
909 | write_lock(&tp->syn_wait_lock); | ||
910 | lopt->syn_table[h] = req; | ||
911 | write_unlock(&tp->syn_wait_lock); | ||
912 | |||
913 | tcp_synq_added(sk); | 905 | tcp_synq_added(sk); |
914 | } | 906 | } |
915 | 907 | ||
@@ -2167,17 +2159,17 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
2167 | if (++st->sbucket >= TCP_SYNQ_HSIZE) | 2159 | if (++st->sbucket >= TCP_SYNQ_HSIZE) |
2168 | break; | 2160 | break; |
2169 | get_req: | 2161 | get_req: |
2170 | req = tp->listen_opt->syn_table[st->sbucket]; | 2162 | req = tp->accept_queue.listen_opt->syn_table[st->sbucket]; |
2171 | } | 2163 | } |
2172 | sk = sk_next(st->syn_wait_sk); | 2164 | sk = sk_next(st->syn_wait_sk); |
2173 | st->state = TCP_SEQ_STATE_LISTENING; | 2165 | st->state = TCP_SEQ_STATE_LISTENING; |
2174 | read_unlock_bh(&tp->syn_wait_lock); | 2166 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); |
2175 | } else { | 2167 | } else { |
2176 | tp = tcp_sk(sk); | 2168 | tp = tcp_sk(sk); |
2177 | read_lock_bh(&tp->syn_wait_lock); | 2169 | read_lock_bh(&tp->accept_queue.syn_wait_lock); |
2178 | if (tp->listen_opt && tp->listen_opt->qlen) | 2170 | if (reqsk_queue_len(&tp->accept_queue)) |
2179 | goto start_req; | 2171 | goto start_req; |
2180 | read_unlock_bh(&tp->syn_wait_lock); | 2172 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); |
2181 | sk = sk_next(sk); | 2173 | sk = sk_next(sk); |
2182 | } | 2174 | } |
2183 | get_sk: | 2175 | get_sk: |
@@ -2187,8 +2179,8 @@ get_sk: | |||
2187 | goto out; | 2179 | goto out; |
2188 | } | 2180 | } |
2189 | tp = tcp_sk(sk); | 2181 | tp = tcp_sk(sk); |
2190 | read_lock_bh(&tp->syn_wait_lock); | 2182 | read_lock_bh(&tp->accept_queue.syn_wait_lock); |
2191 | if (tp->listen_opt && tp->listen_opt->qlen) { | 2183 | if (reqsk_queue_len(&tp->accept_queue)) { |
2192 | start_req: | 2184 | start_req: |
2193 | st->uid = sock_i_uid(sk); | 2185 | st->uid = sock_i_uid(sk); |
2194 | st->syn_wait_sk = sk; | 2186 | st->syn_wait_sk = sk; |
@@ -2196,7 +2188,7 @@ start_req: | |||
2196 | st->sbucket = 0; | 2188 | st->sbucket = 0; |
2197 | goto get_req; | 2189 | goto get_req; |
2198 | } | 2190 | } |
2199 | read_unlock_bh(&tp->syn_wait_lock); | 2191 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); |
2200 | } | 2192 | } |
2201 | if (++st->bucket < TCP_LHTABLE_SIZE) { | 2193 | if (++st->bucket < TCP_LHTABLE_SIZE) { |
2202 | sk = sk_head(&tcp_listening_hash[st->bucket]); | 2194 | sk = sk_head(&tcp_listening_hash[st->bucket]); |
@@ -2383,7 +2375,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) | |||
2383 | case TCP_SEQ_STATE_OPENREQ: | 2375 | case TCP_SEQ_STATE_OPENREQ: |
2384 | if (v) { | 2376 | if (v) { |
2385 | struct tcp_sock *tp = tcp_sk(st->syn_wait_sk); | 2377 | struct tcp_sock *tp = tcp_sk(st->syn_wait_sk); |
2386 | read_unlock_bh(&tp->syn_wait_lock); | 2378 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); |
2387 | } | 2379 | } |
2388 | case TCP_SEQ_STATE_LISTENING: | 2380 | case TCP_SEQ_STATE_LISTENING: |
2389 | if (v != SEQ_START_TOKEN) | 2381 | if (v != SEQ_START_TOKEN) |