diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2008-01-31 08:05:50 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-31 22:28:18 -0500 |
commit | 941b1d22cc035ad58b3d9b44a1c74efac2d7e499 (patch) | |
tree | 9b50211c27a209c02d97b73f74bf33cc87f4c9c3 /net/ipv4 | |
parent | 5ee31fc1ecdcbc234c8c56dcacef87c8e09909d8 (diff) |
[NETNS]: Make bind buckets live in net namespaces.
This tags the inet_bind_bucket struct with net pointer,
initializes it during creation and makes a filtering
during lookup.
A better hashfn, that takes the net into account is to
be done in the future, but currently all bind buckets
with similar port will be in one hash chain.
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/inet_connection_sock.c | 8 | ||||
-rw-r--r-- | net/ipv4/inet_hashtables.c | 8 |
2 files changed, 11 insertions, 5 deletions
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 7801cceb2d1b..de5a41de191a 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -87,6 +87,7 @@ int inet_csk_get_port(struct inet_hashinfo *hashinfo, | |||
87 | struct hlist_node *node; | 87 | struct hlist_node *node; |
88 | struct inet_bind_bucket *tb; | 88 | struct inet_bind_bucket *tb; |
89 | int ret; | 89 | int ret; |
90 | struct net *net = sk->sk_net; | ||
90 | 91 | ||
91 | local_bh_disable(); | 92 | local_bh_disable(); |
92 | if (!snum) { | 93 | if (!snum) { |
@@ -100,7 +101,7 @@ int inet_csk_get_port(struct inet_hashinfo *hashinfo, | |||
100 | head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)]; | 101 | head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)]; |
101 | spin_lock(&head->lock); | 102 | spin_lock(&head->lock); |
102 | inet_bind_bucket_for_each(tb, node, &head->chain) | 103 | inet_bind_bucket_for_each(tb, node, &head->chain) |
103 | if (tb->port == rover) | 104 | if (tb->ib_net == net && tb->port == rover) |
104 | goto next; | 105 | goto next; |
105 | break; | 106 | break; |
106 | next: | 107 | next: |
@@ -127,7 +128,7 @@ int inet_csk_get_port(struct inet_hashinfo *hashinfo, | |||
127 | head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)]; | 128 | head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)]; |
128 | spin_lock(&head->lock); | 129 | spin_lock(&head->lock); |
129 | inet_bind_bucket_for_each(tb, node, &head->chain) | 130 | inet_bind_bucket_for_each(tb, node, &head->chain) |
130 | if (tb->port == snum) | 131 | if (tb->ib_net == net && tb->port == snum) |
131 | goto tb_found; | 132 | goto tb_found; |
132 | } | 133 | } |
133 | tb = NULL; | 134 | tb = NULL; |
@@ -147,7 +148,8 @@ tb_found: | |||
147 | } | 148 | } |
148 | tb_not_found: | 149 | tb_not_found: |
149 | ret = 1; | 150 | ret = 1; |
150 | if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, head, snum)) == NULL) | 151 | if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, |
152 | net, head, snum)) == NULL) | ||
151 | goto fail_unlock; | 153 | goto fail_unlock; |
152 | if (hlist_empty(&tb->owners)) { | 154 | if (hlist_empty(&tb->owners)) { |
153 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) | 155 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index b93d40ff6ef4..db1e53a865c2 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -28,12 +28,14 @@ | |||
28 | * The bindhash mutex for snum's hash chain must be held here. | 28 | * The bindhash mutex for snum's hash chain must be held here. |
29 | */ | 29 | */ |
30 | struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, | 30 | struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, |
31 | struct net *net, | ||
31 | struct inet_bind_hashbucket *head, | 32 | struct inet_bind_hashbucket *head, |
32 | const unsigned short snum) | 33 | const unsigned short snum) |
33 | { | 34 | { |
34 | struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); | 35 | struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); |
35 | 36 | ||
36 | if (tb != NULL) { | 37 | if (tb != NULL) { |
38 | tb->ib_net = net; | ||
37 | tb->port = snum; | 39 | tb->port = snum; |
38 | tb->fastreuse = 0; | 40 | tb->fastreuse = 0; |
39 | INIT_HLIST_HEAD(&tb->owners); | 41 | INIT_HLIST_HEAD(&tb->owners); |
@@ -359,6 +361,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
359 | struct inet_bind_hashbucket *head; | 361 | struct inet_bind_hashbucket *head; |
360 | struct inet_bind_bucket *tb; | 362 | struct inet_bind_bucket *tb; |
361 | int ret; | 363 | int ret; |
364 | struct net *net = sk->sk_net; | ||
362 | 365 | ||
363 | if (!snum) { | 366 | if (!snum) { |
364 | int i, remaining, low, high, port; | 367 | int i, remaining, low, high, port; |
@@ -381,7 +384,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
381 | * unique enough. | 384 | * unique enough. |
382 | */ | 385 | */ |
383 | inet_bind_bucket_for_each(tb, node, &head->chain) { | 386 | inet_bind_bucket_for_each(tb, node, &head->chain) { |
384 | if (tb->port == port) { | 387 | if (tb->ib_net == net && tb->port == port) { |
385 | BUG_TRAP(!hlist_empty(&tb->owners)); | 388 | BUG_TRAP(!hlist_empty(&tb->owners)); |
386 | if (tb->fastreuse >= 0) | 389 | if (tb->fastreuse >= 0) |
387 | goto next_port; | 390 | goto next_port; |
@@ -392,7 +395,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
392 | } | 395 | } |
393 | } | 396 | } |
394 | 397 | ||
395 | tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, head, port); | 398 | tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, |
399 | net, head, port); | ||
396 | if (!tb) { | 400 | if (!tb) { |
397 | spin_unlock(&head->lock); | 401 | spin_unlock(&head->lock); |
398 | break; | 402 | break; |