aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorAlexey Dobriyan <adobriyan@gmail.com>2008-10-08 05:35:07 -0400
committerPatrick McHardy <kaber@trash.net>2008-10-08 05:35:07 -0400
commit6058fa6bb96a5b6145cba10c5171f09c2783ca69 (patch)
tree634c499aa2ce0a76133d5543eaebcdc73a58f4da /net
parenta71996fccce4b2086a26036aa3c915365ca36926 (diff)
netfilter: netns nf_conntrack: per-netns event cache
Heh, last minute proof-reading of this patch made me think, that this is actually unneeded, simply because "ct" pointers will be different for different conntracks in different netns, just like they are different in one netns. Not so sure anymore. [Patrick: pointers will be different, flushing can only be done while inactive though and thus it needs to be per netns] Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/nf_conntrack_core.c12
-rw-r--r--net/netfilter/nf_conntrack_ecache.c26
2 files changed, 28 insertions, 10 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 01f59c57730..b55944e5e4e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1023,7 +1023,8 @@ void nf_conntrack_cleanup(struct net *net)
1023 delete... */ 1023 delete... */
1024 synchronize_net(); 1024 synchronize_net();
1025 1025
1026 nf_ct_event_cache_flush(); 1026 nf_ct_event_cache_flush(net);
1027 nf_conntrack_ecache_fini(net);
1027 i_see_dead_people: 1028 i_see_dead_people:
1028 nf_conntrack_flush(net); 1029 nf_conntrack_flush(net);
1029 if (atomic_read(&net->ct.count) != 0) { 1030 if (atomic_read(&net->ct.count) != 0) {
@@ -1151,11 +1152,14 @@ int nf_conntrack_init(struct net *net)
1151 max_factor = 4; 1152 max_factor = 4;
1152 } 1153 }
1153 atomic_set(&net->ct.count, 0); 1154 atomic_set(&net->ct.count, 0);
1155 ret = nf_conntrack_ecache_init(net);
1156 if (ret < 0)
1157 goto err_ecache;
1154 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1158 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
1155 &net->ct.hash_vmalloc); 1159 &net->ct.hash_vmalloc);
1156 if (!net->ct.hash) { 1160 if (!net->ct.hash) {
1157 printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); 1161 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1158 goto err_out; 1162 goto err_hash;
1159 } 1163 }
1160 INIT_HLIST_HEAD(&net->ct.unconfirmed); 1164 INIT_HLIST_HEAD(&net->ct.unconfirmed);
1161 1165
@@ -1215,6 +1219,8 @@ err_free_conntrack_slab:
1215err_free_hash: 1219err_free_hash:
1216 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1220 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1217 nf_conntrack_htable_size); 1221 nf_conntrack_htable_size);
1218err_out: 1222err_hash:
1223 nf_conntrack_ecache_fini(net);
1224err_ecache:
1219 return -ENOMEM; 1225 return -ENOMEM;
1220} 1226}
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 83c41ac3505..a5f5e2e65d1 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -29,9 +29,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_chain);
29ATOMIC_NOTIFIER_HEAD(nf_ct_expect_chain); 29ATOMIC_NOTIFIER_HEAD(nf_ct_expect_chain);
30EXPORT_SYMBOL_GPL(nf_ct_expect_chain); 30EXPORT_SYMBOL_GPL(nf_ct_expect_chain);
31 31
32DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
33EXPORT_PER_CPU_SYMBOL_GPL(nf_conntrack_ecache);
34
35/* deliver cached events and clear cache entry - must be called with locally 32/* deliver cached events and clear cache entry - must be called with locally
36 * disabled softirqs */ 33 * disabled softirqs */
37static inline void 34static inline void
@@ -51,10 +48,11 @@ __nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
51 * by code prior to async packet handling for freeing the skb */ 48 * by code prior to async packet handling for freeing the skb */
52void nf_ct_deliver_cached_events(const struct nf_conn *ct) 49void nf_ct_deliver_cached_events(const struct nf_conn *ct)
53{ 50{
51 struct net *net = nf_ct_net(ct);
54 struct nf_conntrack_ecache *ecache; 52 struct nf_conntrack_ecache *ecache;
55 53
56 local_bh_disable(); 54 local_bh_disable();
57 ecache = &__get_cpu_var(nf_conntrack_ecache); 55 ecache = per_cpu_ptr(net->ct.ecache, raw_smp_processor_id());
58 if (ecache->ct == ct) 56 if (ecache->ct == ct)
59 __nf_ct_deliver_cached_events(ecache); 57 __nf_ct_deliver_cached_events(ecache);
60 local_bh_enable(); 58 local_bh_enable();
@@ -64,10 +62,11 @@ EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
64/* Deliver cached events for old pending events, if current conntrack != old */ 62/* Deliver cached events for old pending events, if current conntrack != old */
65void __nf_ct_event_cache_init(struct nf_conn *ct) 63void __nf_ct_event_cache_init(struct nf_conn *ct)
66{ 64{
65 struct net *net = nf_ct_net(ct);
67 struct nf_conntrack_ecache *ecache; 66 struct nf_conntrack_ecache *ecache;
68 67
69 /* take care of delivering potentially old events */ 68 /* take care of delivering potentially old events */
70 ecache = &__get_cpu_var(nf_conntrack_ecache); 69 ecache = per_cpu_ptr(net->ct.ecache, raw_smp_processor_id());
71 BUG_ON(ecache->ct == ct); 70 BUG_ON(ecache->ct == ct);
72 if (ecache->ct) 71 if (ecache->ct)
73 __nf_ct_deliver_cached_events(ecache); 72 __nf_ct_deliver_cached_events(ecache);
@@ -79,18 +78,31 @@ EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init);
79 78
80/* flush the event cache - touches other CPU's data and must not be called 79/* flush the event cache - touches other CPU's data and must not be called
81 * while packets are still passing through the code */ 80 * while packets are still passing through the code */
82void nf_ct_event_cache_flush(void) 81void nf_ct_event_cache_flush(struct net *net)
83{ 82{
84 struct nf_conntrack_ecache *ecache; 83 struct nf_conntrack_ecache *ecache;
85 int cpu; 84 int cpu;
86 85
87 for_each_possible_cpu(cpu) { 86 for_each_possible_cpu(cpu) {
88 ecache = &per_cpu(nf_conntrack_ecache, cpu); 87 ecache = per_cpu_ptr(net->ct.ecache, cpu);
89 if (ecache->ct) 88 if (ecache->ct)
90 nf_ct_put(ecache->ct); 89 nf_ct_put(ecache->ct);
91 } 90 }
92} 91}
93 92
93int nf_conntrack_ecache_init(struct net *net)
94{
95 net->ct.ecache = alloc_percpu(struct nf_conntrack_ecache);
96 if (!net->ct.ecache)
97 return -ENOMEM;
98 return 0;
99}
100
101void nf_conntrack_ecache_fini(struct net *net)
102{
103 free_percpu(net->ct.ecache);
104}
105
94int nf_conntrack_register_notifier(struct notifier_block *nb) 106int nf_conntrack_register_notifier(struct notifier_block *nb)
95{ 107{
96 return atomic_notifier_chain_register(&nf_conntrack_chain, nb); 108 return atomic_notifier_chain_register(&nf_conntrack_chain, nb);