aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-11-12 03:54:20 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-12 03:54:20 -0500
commit7a9546ee354ec6f23af403992b8c07baa50a23d2 (patch)
tree8536e2f4aa79921c0b2666795cdff91be80a2057 /net/ipv4
parent8f424b5f32d78b4f353b3cddca9804808ef063eb (diff)
net: ib_net pointer should depends on CONFIG_NET_NS
We can shrink size of "struct inet_bind_bucket" by 50%, using read_pnet() and write_pnet() Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/inet_hashtables.c6
2 files changed, 5 insertions, 5 deletions
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 36f4cbc7da3a..05af807ca9b9 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -109,7 +109,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
109 hashinfo->bhash_size)]; 109 hashinfo->bhash_size)];
110 spin_lock(&head->lock); 110 spin_lock(&head->lock);
111 inet_bind_bucket_for_each(tb, node, &head->chain) 111 inet_bind_bucket_for_each(tb, node, &head->chain)
112 if (tb->ib_net == net && tb->port == rover) 112 if (ib_net(tb) == net && tb->port == rover)
113 goto next; 113 goto next;
114 break; 114 break;
115 next: 115 next:
@@ -137,7 +137,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
137 hashinfo->bhash_size)]; 137 hashinfo->bhash_size)];
138 spin_lock(&head->lock); 138 spin_lock(&head->lock);
139 inet_bind_bucket_for_each(tb, node, &head->chain) 139 inet_bind_bucket_for_each(tb, node, &head->chain)
140 if (tb->ib_net == net && tb->port == snum) 140 if (ib_net(tb) == net && tb->port == snum)
141 goto tb_found; 141 goto tb_found;
142 } 142 }
143 tb = NULL; 143 tb = NULL;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 44981906fb91..be41ebbec4eb 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -35,7 +35,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
35 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); 35 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
36 36
37 if (tb != NULL) { 37 if (tb != NULL) {
38 tb->ib_net = hold_net(net); 38 write_pnet(&tb->ib_net, hold_net(net));
39 tb->port = snum; 39 tb->port = snum;
40 tb->fastreuse = 0; 40 tb->fastreuse = 0;
41 INIT_HLIST_HEAD(&tb->owners); 41 INIT_HLIST_HEAD(&tb->owners);
@@ -51,7 +51,7 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket
51{ 51{
52 if (hlist_empty(&tb->owners)) { 52 if (hlist_empty(&tb->owners)) {
53 __hlist_del(&tb->node); 53 __hlist_del(&tb->node);
54 release_net(tb->ib_net); 54 release_net(ib_net(tb));
55 kmem_cache_free(cachep, tb); 55 kmem_cache_free(cachep, tb);
56 } 56 }
57} 57}
@@ -449,7 +449,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
449 * unique enough. 449 * unique enough.
450 */ 450 */
451 inet_bind_bucket_for_each(tb, node, &head->chain) { 451 inet_bind_bucket_for_each(tb, node, &head->chain) {
452 if (tb->ib_net == net && tb->port == port) { 452 if (ib_net(tb) == net && tb->port == port) {
453 WARN_ON(hlist_empty(&tb->owners)); 453 WARN_ON(hlist_empty(&tb->owners));
454 if (tb->fastreuse >= 0) 454 if (tb->fastreuse >= 0)
455 goto next_port; 455 goto next_port;