aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/inetpeer.c
diff options
context:
space:
mode:
authorGao feng <gaofeng@cn.fujitsu.com>2012-06-07 21:20:41 -0400
committerDavid S. Miller <davem@davemloft.net>2012-06-08 17:27:23 -0400
commitc8a627ed06d6d49bf65015a2185c519335c4c83f (patch)
tree2a67acd810661f3655162868bef0306e7ec51b86 /net/ipv4/inetpeer.c
parent1578e7778fa04eb7e32da561effee6cd38139b0f (diff)
inetpeer: add namespace support for inetpeer
now inetpeer doesn't support namespace,the information will be leaking across namespace. this patch move the global vars v4_peers and v6_peers to netns_ipv4 and netns_ipv6 as a field peers. add struct pernet_operations inetpeer_ops to initial pernet inetpeer data. and change family_to_base and inet_getpeer to support namespace. Signed-off-by: Gao feng <gaofeng@cn.fujitsu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inetpeer.c')
-rw-r--r--net/ipv4/inetpeer.c68
1 files changed, 50 insertions, 18 deletions
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index dfba343b2509..1c8527349c86 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -88,18 +88,6 @@ struct inet_peer_base {
88 int total; 88 int total;
89}; 89};
90 90
91static struct inet_peer_base v4_peers = {
92 .root = peer_avl_empty_rcu,
93 .lock = __SEQLOCK_UNLOCKED(v4_peers.lock),
94 .total = 0,
95};
96
97static struct inet_peer_base v6_peers = {
98 .root = peer_avl_empty_rcu,
99 .lock = __SEQLOCK_UNLOCKED(v6_peers.lock),
100 .total = 0,
101};
102
103#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ 91#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
104 92
105/* Exported for sysctl_net_ipv4. */ 93/* Exported for sysctl_net_ipv4. */
@@ -153,6 +141,46 @@ static void inetpeer_gc_worker(struct work_struct *work)
153 schedule_delayed_work(&gc_work, gc_delay); 141 schedule_delayed_work(&gc_work, gc_delay);
154} 142}
155 143
144static int __net_init inetpeer_net_init(struct net *net)
145{
146 net->ipv4.peers = kzalloc(sizeof(struct inet_peer_base),
147 GFP_KERNEL);
148 if (net->ipv4.peers == NULL)
149 return -ENOMEM;
150
151 net->ipv4.peers->root = peer_avl_empty_rcu;
152 seqlock_init(&net->ipv4.peers->lock);
153
154 net->ipv6.peers = kzalloc(sizeof(struct inet_peer_base),
155 GFP_KERNEL);
156 if (net->ipv6.peers == NULL)
157 goto out_ipv6;
158
159 net->ipv6.peers->root = peer_avl_empty_rcu;
160 seqlock_init(&net->ipv6.peers->lock);
161
162 return 0;
163out_ipv6:
164 kfree(net->ipv4.peers);
165 return -ENOMEM;
166}
167
168static void __net_exit inetpeer_net_exit(struct net *net)
169{
170 inetpeer_invalidate_tree(net, AF_INET);
171 kfree(net->ipv4.peers);
172 net->ipv4.peers = NULL;
173
174 inetpeer_invalidate_tree(net, AF_INET6);
175 kfree(net->ipv6.peers);
176 net->ipv6.peers = NULL;
177}
178
179static struct pernet_operations inetpeer_ops = {
180 .init = inetpeer_net_init,
181 .exit = inetpeer_net_exit,
182};
183
156/* Called from ip_output.c:ip_init */ 184/* Called from ip_output.c:ip_init */
157void __init inet_initpeers(void) 185void __init inet_initpeers(void)
158{ 186{
@@ -177,6 +205,7 @@ void __init inet_initpeers(void)
177 NULL); 205 NULL);
178 206
179 INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker); 207 INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
208 register_pernet_subsys(&inetpeer_ops);
180} 209}
181 210
182static int addr_compare(const struct inetpeer_addr *a, 211static int addr_compare(const struct inetpeer_addr *a,
@@ -401,9 +430,10 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
401 call_rcu(&p->rcu, inetpeer_free_rcu); 430 call_rcu(&p->rcu, inetpeer_free_rcu);
402} 431}
403 432
404static struct inet_peer_base *family_to_base(int family) 433static struct inet_peer_base *family_to_base(struct net *net,
434 int family)
405{ 435{
406 return family == AF_INET ? &v4_peers : &v6_peers; 436 return family == AF_INET ? net->ipv4.peers : net->ipv6.peers;
407} 437}
408 438
409/* perform garbage collect on all items stacked during a lookup */ 439/* perform garbage collect on all items stacked during a lookup */
@@ -443,10 +473,12 @@ static int inet_peer_gc(struct inet_peer_base *base,
443 return cnt; 473 return cnt;
444} 474}
445 475
446struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create) 476struct inet_peer *inet_getpeer(struct net *net,
477 const struct inetpeer_addr *daddr,
478 int create)
447{ 479{
448 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; 480 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
449 struct inet_peer_base *base = family_to_base(daddr->family); 481 struct inet_peer_base *base = family_to_base(net, daddr->family);
450 struct inet_peer *p; 482 struct inet_peer *p;
451 unsigned int sequence; 483 unsigned int sequence;
452 int invalidated, gccnt = 0; 484 int invalidated, gccnt = 0;
@@ -571,10 +603,10 @@ static void inetpeer_inval_rcu(struct rcu_head *head)
571 schedule_delayed_work(&gc_work, gc_delay); 603 schedule_delayed_work(&gc_work, gc_delay);
572} 604}
573 605
574void inetpeer_invalidate_tree(int family) 606void inetpeer_invalidate_tree(struct net *net, int family)
575{ 607{
576 struct inet_peer *old, *new, *prev; 608 struct inet_peer *old, *new, *prev;
577 struct inet_peer_base *base = family_to_base(family); 609 struct inet_peer_base *base = family_to_base(net, family);
578 610
579 write_seqlock_bh(&base->lock); 611 write_seqlock_bh(&base->lock);
580 612