diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2008-11-12 03:54:20 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-12 03:54:20 -0500 |
commit | 7a9546ee354ec6f23af403992b8c07baa50a23d2 (patch) | |
tree | 8536e2f4aa79921c0b2666795cdff91be80a2057 /net/ipv4/inet_hashtables.c | |
parent | 8f424b5f32d78b4f353b3cddca9804808ef063eb (diff) |
net: ib_net pointer should depends on CONFIG_NET_NS
We can shrink size of "struct inet_bind_bucket" by 50%, using
read_pnet() and write_pnet()
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_hashtables.c')
-rw-r--r-- | net/ipv4/inet_hashtables.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 44981906fb91..be41ebbec4eb 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -35,7 +35,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, | |||
35 | struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); | 35 | struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); |
36 | 36 | ||
37 | if (tb != NULL) { | 37 | if (tb != NULL) { |
38 | tb->ib_net = hold_net(net); | 38 | write_pnet(&tb->ib_net, hold_net(net)); |
39 | tb->port = snum; | 39 | tb->port = snum; |
40 | tb->fastreuse = 0; | 40 | tb->fastreuse = 0; |
41 | INIT_HLIST_HEAD(&tb->owners); | 41 | INIT_HLIST_HEAD(&tb->owners); |
@@ -51,7 +51,7 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket | |||
51 | { | 51 | { |
52 | if (hlist_empty(&tb->owners)) { | 52 | if (hlist_empty(&tb->owners)) { |
53 | __hlist_del(&tb->node); | 53 | __hlist_del(&tb->node); |
54 | release_net(tb->ib_net); | 54 | release_net(ib_net(tb)); |
55 | kmem_cache_free(cachep, tb); | 55 | kmem_cache_free(cachep, tb); |
56 | } | 56 | } |
57 | } | 57 | } |
@@ -449,7 +449,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
449 | * unique enough. | 449 | * unique enough. |
450 | */ | 450 | */ |
451 | inet_bind_bucket_for_each(tb, node, &head->chain) { | 451 | inet_bind_bucket_for_each(tb, node, &head->chain) { |
452 | if (tb->ib_net == net && tb->port == port) { | 452 | if (ib_net(tb) == net && tb->port == port) { |
453 | WARN_ON(hlist_empty(&tb->owners)); | 453 | WARN_ON(hlist_empty(&tb->owners)); |
454 | if (tb->fastreuse >= 0) | 454 | if (tb->fastreuse >= 0) |
455 | goto next_port; | 455 | goto next_port; |