aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@ghostprotocols.net>2005-08-09 23:01:14 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:38:48 -0400
commita55ebcc4c4532107ad9eee1c9bb698ab5f12c00f (patch)
tree964be5918610e6f89cc7f2fee48418979b48773b /net/ipv4/tcp_ipv4.c
parent77d8bf9c6208eb535f05718168ffcc476be0ca8c (diff)
[INET]: Move bind_hash from tcp_sk to inet_sk
This should really be in a inet_connection_sock, but I'm leaving it for a later optimization, when some more fields common to INET transport protocols now in tcp_sk or inet_sk will be chunked out into inet_connection_sock, for now its better to concentrate on getting the changes in the core merged to leave the DCCP tree with only DCCP specific code. Next changesets will take advantage of this move to generalise things like tcp_bind_hash, tcp_put_port, tcp_inherit_port, making the later receive a inet_hashinfo parameter, and even __tcp_tw_hashdance, etc in the future, when tcp_tw_bucket gets transformed into the struct timewait_sock hierarchy. tcp_destroy_sock also is eligible as soon as tcp_orphan_count gets moved to sk_prot. A cascade of incremental changes will ultimately make the tcp_lookup functions be fully generic. Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 58e36ed88f25..10a9b3ae3442 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -113,9 +113,9 @@ static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
113 struct inet_bind_bucket *tb; 113 struct inet_bind_bucket *tb;
114 114
115 spin_lock(&head->lock); 115 spin_lock(&head->lock);
116 tb = tcp_sk(sk)->bind_hash; 116 tb = inet_sk(sk)->bind_hash;
117 sk_add_bind_node(child, &tb->owners); 117 sk_add_bind_node(child, &tb->owners);
118 tcp_sk(child)->bind_hash = tb; 118 inet_sk(child)->bind_hash = tb;
119 spin_unlock(&head->lock); 119 spin_unlock(&head->lock);
120} 120}
121 121
@@ -129,9 +129,10 @@ inline void tcp_inherit_port(struct sock *sk, struct sock *child)
129void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, 129void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
130 const unsigned short snum) 130 const unsigned short snum)
131{ 131{
132 inet_sk(sk)->num = snum; 132 struct inet_sock *inet = inet_sk(sk);
133 inet->num = snum;
133 sk_add_bind_node(sk, &tb->owners); 134 sk_add_bind_node(sk, &tb->owners);
134 tcp_sk(sk)->bind_hash = tb; 135 inet->bind_hash = tb;
135} 136}
136 137
137static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) 138static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
@@ -246,9 +247,9 @@ tb_not_found:
246 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) 247 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
247 tb->fastreuse = 0; 248 tb->fastreuse = 0;
248success: 249success:
249 if (!tcp_sk(sk)->bind_hash) 250 if (!inet_sk(sk)->bind_hash)
250 tcp_bind_hash(sk, tb, snum); 251 tcp_bind_hash(sk, tb, snum);
251 BUG_TRAP(tcp_sk(sk)->bind_hash == tb); 252 BUG_TRAP(inet_sk(sk)->bind_hash == tb);
252 ret = 0; 253 ret = 0;
253 254
254fail_unlock: 255fail_unlock:
@@ -269,9 +270,9 @@ static void __tcp_put_port(struct sock *sk)
269 struct inet_bind_bucket *tb; 270 struct inet_bind_bucket *tb;
270 271
271 spin_lock(&head->lock); 272 spin_lock(&head->lock);
272 tb = tcp_sk(sk)->bind_hash; 273 tb = inet->bind_hash;
273 __sk_del_bind_node(sk); 274 __sk_del_bind_node(sk);
274 tcp_sk(sk)->bind_hash = NULL; 275 inet->bind_hash = NULL;
275 inet->num = 0; 276 inet->num = 0;
276 inet_bind_bucket_destroy(tcp_bucket_cachep, tb); 277 inet_bind_bucket_destroy(tcp_bucket_cachep, tb);
277 spin_unlock(&head->lock); 278 spin_unlock(&head->lock);
@@ -694,7 +695,7 @@ ok:
694 } 695 }
695 696
696 head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; 697 head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)];
697 tb = tcp_sk(sk)->bind_hash; 698 tb = inet_sk(sk)->bind_hash;
698 spin_lock_bh(&head->lock); 699 spin_lock_bh(&head->lock);
699 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 700 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
700 __tcp_v4_hash(sk, 0); 701 __tcp_v4_hash(sk, 0);
@@ -1940,7 +1941,7 @@ int tcp_v4_destroy_sock(struct sock *sk)
1940 __skb_queue_purge(&tp->ucopy.prequeue); 1941 __skb_queue_purge(&tp->ucopy.prequeue);
1941 1942
1942 /* Clean up a referenced TCP bind bucket. */ 1943 /* Clean up a referenced TCP bind bucket. */
1943 if (tp->bind_hash) 1944 if (inet_sk(sk)->bind_hash)
1944 tcp_put_port(sk); 1945 tcp_put_port(sk);
1945 1946
1946 /* 1947 /*