aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ip.h2
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/net/tcp.h2
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_ipv4.c21
-rw-r--r--net/ipv4/tcp_minisocks.c15
-rw-r--r--net/ipv6/tcp_ipv6.c8
7 files changed, 28 insertions, 25 deletions
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 33e8a19a1a0f..2c54bbd3da76 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -128,6 +128,7 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
128 return (struct inet_request_sock *)sk; 128 return (struct inet_request_sock *)sk;
129} 129}
130 130
131struct inet_bind_bucket;
131struct ipv6_pinfo; 132struct ipv6_pinfo;
132 133
133struct inet_sock { 134struct inet_sock {
@@ -157,6 +158,7 @@ struct inet_sock {
157 int mc_index; /* Multicast device index */ 158 int mc_index; /* Multicast device index */
158 __u32 mc_addr; 159 __u32 mc_addr;
159 struct ip_mc_socklist *mc_list; /* Group array */ 160 struct ip_mc_socklist *mc_list; /* Group array */
161 struct inet_bind_bucket *bind_hash;
160 /* 162 /*
161 * Following members are used to retain the infomation to build 163 * Following members are used to retain the infomation to build
162 * an ip header on each ip fragmentation while the socket is corked. 164 * an ip header on each ip fragmentation while the socket is corked.
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index ec580a560e8c..e70ab19652db 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -258,7 +258,6 @@ struct tcp_sock {
258 __u32 snd_sml; /* Last byte of the most recently transmitted small packet */ 258 __u32 snd_sml; /* Last byte of the most recently transmitted small packet */
259 __u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ 259 __u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
260 __u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ 260 __u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
261 struct inet_bind_bucket *bind_hash;
262 /* Delayed ACK control data */ 261 /* Delayed ACK control data */
263 struct { 262 struct {
264 __u8 pending; /* ACK is pending */ 263 __u8 pending; /* ACK is pending */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index ff5d30ac2b06..6c6c879e7e87 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1266,7 +1266,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
1266 TCP_INC_STATS(TCP_MIB_ESTABRESETS); 1266 TCP_INC_STATS(TCP_MIB_ESTABRESETS);
1267 1267
1268 sk->sk_prot->unhash(sk); 1268 sk->sk_prot->unhash(sk);
1269 if (tcp_sk(sk)->bind_hash && 1269 if (inet_sk(sk)->bind_hash &&
1270 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 1270 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1271 tcp_put_port(sk); 1271 tcp_put_port(sk);
1272 /* fall through */ 1272 /* fall through */
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1ec03db7dcd9..e54a410ca701 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1575,7 +1575,7 @@ void tcp_destroy_sock(struct sock *sk)
1575 BUG_TRAP(sk_unhashed(sk)); 1575 BUG_TRAP(sk_unhashed(sk));
1576 1576
1577 /* If it has not 0 inet_sk(sk)->num, it must be bound */ 1577 /* If it has not 0 inet_sk(sk)->num, it must be bound */
1578 BUG_TRAP(!inet_sk(sk)->num || tcp_sk(sk)->bind_hash); 1578 BUG_TRAP(!inet_sk(sk)->num || inet_sk(sk)->bind_hash);
1579 1579
1580 sk->sk_prot->destroy(sk); 1580 sk->sk_prot->destroy(sk);
1581 1581
@@ -1802,7 +1802,7 @@ int tcp_disconnect(struct sock *sk, int flags)
1802 tcp_sack_reset(&tp->rx_opt); 1802 tcp_sack_reset(&tp->rx_opt);
1803 __sk_dst_reset(sk); 1803 __sk_dst_reset(sk);
1804 1804
1805 BUG_TRAP(!inet->num || tp->bind_hash); 1805 BUG_TRAP(!inet->num || inet->bind_hash);
1806 1806
1807 sk->sk_error_report(sk); 1807 sk->sk_error_report(sk);
1808 return err; 1808 return err;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 58e36ed88f25..10a9b3ae3442 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -113,9 +113,9 @@ static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
113 struct inet_bind_bucket *tb; 113 struct inet_bind_bucket *tb;
114 114
115 spin_lock(&head->lock); 115 spin_lock(&head->lock);
116 tb = tcp_sk(sk)->bind_hash; 116 tb = inet_sk(sk)->bind_hash;
117 sk_add_bind_node(child, &tb->owners); 117 sk_add_bind_node(child, &tb->owners);
118 tcp_sk(child)->bind_hash = tb; 118 inet_sk(child)->bind_hash = tb;
119 spin_unlock(&head->lock); 119 spin_unlock(&head->lock);
120} 120}
121 121
@@ -129,9 +129,10 @@ inline void tcp_inherit_port(struct sock *sk, struct sock *child)
129void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, 129void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
130 const unsigned short snum) 130 const unsigned short snum)
131{ 131{
132 inet_sk(sk)->num = snum; 132 struct inet_sock *inet = inet_sk(sk);
133 inet->num = snum;
133 sk_add_bind_node(sk, &tb->owners); 134 sk_add_bind_node(sk, &tb->owners);
134 tcp_sk(sk)->bind_hash = tb; 135 inet->bind_hash = tb;
135} 136}
136 137
137static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) 138static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
@@ -246,9 +247,9 @@ tb_not_found:
246 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) 247 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
247 tb->fastreuse = 0; 248 tb->fastreuse = 0;
248success: 249success:
249 if (!tcp_sk(sk)->bind_hash) 250 if (!inet_sk(sk)->bind_hash)
250 tcp_bind_hash(sk, tb, snum); 251 tcp_bind_hash(sk, tb, snum);
251 BUG_TRAP(tcp_sk(sk)->bind_hash == tb); 252 BUG_TRAP(inet_sk(sk)->bind_hash == tb);
252 ret = 0; 253 ret = 0;
253 254
254fail_unlock: 255fail_unlock:
@@ -269,9 +270,9 @@ static void __tcp_put_port(struct sock *sk)
269 struct inet_bind_bucket *tb; 270 struct inet_bind_bucket *tb;
270 271
271 spin_lock(&head->lock); 272 spin_lock(&head->lock);
272 tb = tcp_sk(sk)->bind_hash; 273 tb = inet->bind_hash;
273 __sk_del_bind_node(sk); 274 __sk_del_bind_node(sk);
274 tcp_sk(sk)->bind_hash = NULL; 275 inet->bind_hash = NULL;
275 inet->num = 0; 276 inet->num = 0;
276 inet_bind_bucket_destroy(tcp_bucket_cachep, tb); 277 inet_bind_bucket_destroy(tcp_bucket_cachep, tb);
277 spin_unlock(&head->lock); 278 spin_unlock(&head->lock);
@@ -694,7 +695,7 @@ ok:
694 } 695 }
695 696
696 head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; 697 head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)];
697 tb = tcp_sk(sk)->bind_hash; 698 tb = inet_sk(sk)->bind_hash;
698 spin_lock_bh(&head->lock); 699 spin_lock_bh(&head->lock);
699 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 700 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
700 __tcp_v4_hash(sk, 0); 701 __tcp_v4_hash(sk, 0);
@@ -1940,7 +1941,7 @@ int tcp_v4_destroy_sock(struct sock *sk)
1940 __skb_queue_purge(&tp->ucopy.prequeue); 1941 __skb_queue_purge(&tp->ucopy.prequeue);
1941 1942
1942 /* Clean up a referenced TCP bind bucket. */ 1943 /* Clean up a referenced TCP bind bucket. */
1943 if (tp->bind_hash) 1944 if (inet_sk(sk)->bind_hash)
1944 tcp_put_port(sk); 1945 tcp_put_port(sk);
1945 1946
1946 /* 1947 /*
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 1df6cd46066b..267cea1087e5 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -296,17 +296,17 @@ kill:
296 */ 296 */
297static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) 297static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
298{ 298{
299 const struct inet_sock *inet = inet_sk(sk);
299 struct inet_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent]; 300 struct inet_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
300 struct inet_bind_hashbucket *bhead; 301 struct inet_bind_hashbucket *bhead;
301
302 /* Step 1: Put TW into bind hash. Original socket stays there too. 302 /* Step 1: Put TW into bind hash. Original socket stays there too.
303 Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in 303 Note, that any socket with inet->num != 0 MUST be bound in
304 binding cache, even if it is closed. 304 binding cache, even if it is closed.
305 */ 305 */
306 bhead = &tcp_bhash[inet_bhashfn(inet_sk(sk)->num, tcp_bhash_size)]; 306 bhead = &tcp_bhash[inet_bhashfn(inet->num, tcp_bhash_size)];
307 spin_lock(&bhead->lock); 307 spin_lock(&bhead->lock);
308 tw->tw_tb = tcp_sk(sk)->bind_hash; 308 tw->tw_tb = inet->bind_hash;
309 BUG_TRAP(tcp_sk(sk)->bind_hash); 309 BUG_TRAP(inet->bind_hash);
310 tw_add_bind_node(tw, &tw->tw_tb->owners); 310 tw_add_bind_node(tw, &tw->tw_tb->owners);
311 spin_unlock(&bhead->lock); 311 spin_unlock(&bhead->lock);
312 312
@@ -694,6 +694,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
694 if(newsk != NULL) { 694 if(newsk != NULL) {
695 struct inet_request_sock *ireq = inet_rsk(req); 695 struct inet_request_sock *ireq = inet_rsk(req);
696 struct tcp_request_sock *treq = tcp_rsk(req); 696 struct tcp_request_sock *treq = tcp_rsk(req);
697 struct inet_sock *newinet = inet_sk(newsk);
697 struct tcp_sock *newtp; 698 struct tcp_sock *newtp;
698 struct sk_filter *filter; 699 struct sk_filter *filter;
699 700
@@ -702,10 +703,10 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
702 703
703 /* SANITY */ 704 /* SANITY */
704 sk_node_init(&newsk->sk_node); 705 sk_node_init(&newsk->sk_node);
705 tcp_sk(newsk)->bind_hash = NULL; 706 newinet->bind_hash = NULL;
706 707
707 /* Clone the TCP header template */ 708 /* Clone the TCP header template */
708 inet_sk(newsk)->dport = ireq->rmt_port; 709 newinet->dport = ireq->rmt_port;
709 710
710 sock_lock_init(newsk); 711 sock_lock_init(newsk);
711 bh_lock_sock(newsk); 712 bh_lock_sock(newsk);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 31f50fb29ffb..a8ca7ba06c1c 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -204,9 +204,9 @@ tb_not_found:
204 tb->fastreuse = 0; 204 tb->fastreuse = 0;
205 205
206success: 206success:
207 if (!tcp_sk(sk)->bind_hash) 207 if (!inet_sk(sk)->bind_hash)
208 tcp_bind_hash(sk, tb, snum); 208 tcp_bind_hash(sk, tb, snum);
209 BUG_TRAP(tcp_sk(sk)->bind_hash == tb); 209 BUG_TRAP(inet_sk(sk)->bind_hash == tb);
210 ret = 0; 210 ret = 0;
211 211
212fail_unlock: 212fail_unlock:
@@ -613,8 +613,8 @@ ok:
613 goto out; 613 goto out;
614 } 614 }
615 615
616 head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; 616 head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)];
617 tb = tcp_sk(sk)->bind_hash; 617 tb = inet_sk(sk)->bind_hash;
618 spin_lock_bh(&head->lock); 618 spin_lock_bh(&head->lock);
619 619
620 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 620 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {