diff options
-rw-r--r-- | include/net/netfilter/nf_conntrack_ecache.h | 22 | ||||
-rw-r--r-- | include/net/netns/conntrack.h | 5 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_core.c | 12 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_ecache.c | 26 |
4 files changed, 49 insertions, 16 deletions
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index c1b406cecf9b..35f814c1e2ca 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include <linux/notifier.h> | 9 | #include <linux/notifier.h> |
10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include <net/net_namespace.h> | ||
11 | #include <net/netfilter/nf_conntrack_expect.h> | 12 | #include <net/netfilter/nf_conntrack_expect.h> |
12 | 13 | ||
13 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | 14 | #ifdef CONFIG_NF_CONNTRACK_EVENTS |
@@ -15,9 +16,6 @@ struct nf_conntrack_ecache { | |||
15 | struct nf_conn *ct; | 16 | struct nf_conn *ct; |
16 | unsigned int events; | 17 | unsigned int events; |
17 | }; | 18 | }; |
18 | DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); | ||
19 | |||
20 | #define CONNTRACK_ECACHE(x) (__get_cpu_var(nf_conntrack_ecache).x) | ||
21 | 19 | ||
22 | extern struct atomic_notifier_head nf_conntrack_chain; | 20 | extern struct atomic_notifier_head nf_conntrack_chain; |
23 | extern int nf_conntrack_register_notifier(struct notifier_block *nb); | 21 | extern int nf_conntrack_register_notifier(struct notifier_block *nb); |
@@ -25,15 +23,16 @@ extern int nf_conntrack_unregister_notifier(struct notifier_block *nb); | |||
25 | 23 | ||
26 | extern void nf_ct_deliver_cached_events(const struct nf_conn *ct); | 24 | extern void nf_ct_deliver_cached_events(const struct nf_conn *ct); |
27 | extern void __nf_ct_event_cache_init(struct nf_conn *ct); | 25 | extern void __nf_ct_event_cache_init(struct nf_conn *ct); |
28 | extern void nf_ct_event_cache_flush(void); | 26 | extern void nf_ct_event_cache_flush(struct net *net); |
29 | 27 | ||
30 | static inline void | 28 | static inline void |
31 | nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) | 29 | nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) |
32 | { | 30 | { |
31 | struct net *net = nf_ct_net(ct); | ||
33 | struct nf_conntrack_ecache *ecache; | 32 | struct nf_conntrack_ecache *ecache; |
34 | 33 | ||
35 | local_bh_disable(); | 34 | local_bh_disable(); |
36 | ecache = &__get_cpu_var(nf_conntrack_ecache); | 35 | ecache = per_cpu_ptr(net->ct.ecache, raw_smp_processor_id()); |
37 | if (ct != ecache->ct) | 36 | if (ct != ecache->ct) |
38 | __nf_ct_event_cache_init(ct); | 37 | __nf_ct_event_cache_init(ct); |
39 | ecache->events |= event; | 38 | ecache->events |= event; |
@@ -58,6 +57,9 @@ nf_ct_expect_event(enum ip_conntrack_expect_events event, | |||
58 | atomic_notifier_call_chain(&nf_ct_expect_chain, event, exp); | 57 | atomic_notifier_call_chain(&nf_ct_expect_chain, event, exp); |
59 | } | 58 | } |
60 | 59 | ||
60 | extern int nf_conntrack_ecache_init(struct net *net); | ||
61 | extern void nf_conntrack_ecache_fini(struct net *net); | ||
62 | |||
61 | #else /* CONFIG_NF_CONNTRACK_EVENTS */ | 63 | #else /* CONFIG_NF_CONNTRACK_EVENTS */ |
62 | 64 | ||
63 | static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, | 65 | static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, |
@@ -67,7 +69,15 @@ static inline void nf_conntrack_event(enum ip_conntrack_events event, | |||
67 | static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {} | 69 | static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {} |
68 | static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event, | 70 | static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event, |
69 | struct nf_conntrack_expect *exp) {} | 71 | struct nf_conntrack_expect *exp) {} |
70 | static inline void nf_ct_event_cache_flush(void) {} | 72 | static inline void nf_ct_event_cache_flush(struct net *net) {} |
73 | |||
74 | static inline int nf_conntrack_ecache_init(struct net *net) | ||
75 | { | ||
76 | return 0; | ||
77 | |||
78 | static inline void nf_conntrack_ecache_fini(struct net *net) | ||
79 | { | ||
80 | } | ||
71 | #endif /* CONFIG_NF_CONNTRACK_EVENTS */ | 81 | #endif /* CONFIG_NF_CONNTRACK_EVENTS */ |
72 | 82 | ||
73 | #endif /*_NF_CONNTRACK_ECACHE_H*/ | 83 | #endif /*_NF_CONNTRACK_ECACHE_H*/ |
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 6ddf58e142a9..9d5c1623c51f 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h | |||
@@ -4,12 +4,17 @@ | |||
4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
5 | #include <asm/atomic.h> | 5 | #include <asm/atomic.h> |
6 | 6 | ||
7 | struct nf_conntrack_ecache; | ||
8 | |||
7 | struct netns_ct { | 9 | struct netns_ct { |
8 | atomic_t count; | 10 | atomic_t count; |
9 | unsigned int expect_count; | 11 | unsigned int expect_count; |
10 | struct hlist_head *hash; | 12 | struct hlist_head *hash; |
11 | struct hlist_head *expect_hash; | 13 | struct hlist_head *expect_hash; |
12 | struct hlist_head unconfirmed; | 14 | struct hlist_head unconfirmed; |
15 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | ||
16 | struct nf_conntrack_ecache *ecache; | ||
17 | #endif | ||
13 | int hash_vmalloc; | 18 | int hash_vmalloc; |
14 | int expect_vmalloc; | 19 | int expect_vmalloc; |
15 | }; | 20 | }; |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 01f59c57730a..b55944e5e4e2 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -1023,7 +1023,8 @@ void nf_conntrack_cleanup(struct net *net) | |||
1023 | delete... */ | 1023 | delete... */ |
1024 | synchronize_net(); | 1024 | synchronize_net(); |
1025 | 1025 | ||
1026 | nf_ct_event_cache_flush(); | 1026 | nf_ct_event_cache_flush(net); |
1027 | nf_conntrack_ecache_fini(net); | ||
1027 | i_see_dead_people: | 1028 | i_see_dead_people: |
1028 | nf_conntrack_flush(net); | 1029 | nf_conntrack_flush(net); |
1029 | if (atomic_read(&net->ct.count) != 0) { | 1030 | if (atomic_read(&net->ct.count) != 0) { |
@@ -1151,11 +1152,14 @@ int nf_conntrack_init(struct net *net) | |||
1151 | max_factor = 4; | 1152 | max_factor = 4; |
1152 | } | 1153 | } |
1153 | atomic_set(&net->ct.count, 0); | 1154 | atomic_set(&net->ct.count, 0); |
1155 | ret = nf_conntrack_ecache_init(net); | ||
1156 | if (ret < 0) | ||
1157 | goto err_ecache; | ||
1154 | net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, | 1158 | net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, |
1155 | &net->ct.hash_vmalloc); | 1159 | &net->ct.hash_vmalloc); |
1156 | if (!net->ct.hash) { | 1160 | if (!net->ct.hash) { |
1157 | printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); | 1161 | printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); |
1158 | goto err_out; | 1162 | goto err_hash; |
1159 | } | 1163 | } |
1160 | INIT_HLIST_HEAD(&net->ct.unconfirmed); | 1164 | INIT_HLIST_HEAD(&net->ct.unconfirmed); |
1161 | 1165 | ||
@@ -1215,6 +1219,8 @@ err_free_conntrack_slab: | |||
1215 | err_free_hash: | 1219 | err_free_hash: |
1216 | nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, | 1220 | nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, |
1217 | nf_conntrack_htable_size); | 1221 | nf_conntrack_htable_size); |
1218 | err_out: | 1222 | err_hash: |
1223 | nf_conntrack_ecache_fini(net); | ||
1224 | err_ecache: | ||
1219 | return -ENOMEM; | 1225 | return -ENOMEM; |
1220 | } | 1226 | } |
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 83c41ac3505b..a5f5e2e65d13 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c | |||
@@ -29,9 +29,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_chain); | |||
29 | ATOMIC_NOTIFIER_HEAD(nf_ct_expect_chain); | 29 | ATOMIC_NOTIFIER_HEAD(nf_ct_expect_chain); |
30 | EXPORT_SYMBOL_GPL(nf_ct_expect_chain); | 30 | EXPORT_SYMBOL_GPL(nf_ct_expect_chain); |
31 | 31 | ||
32 | DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); | ||
33 | EXPORT_PER_CPU_SYMBOL_GPL(nf_conntrack_ecache); | ||
34 | |||
35 | /* deliver cached events and clear cache entry - must be called with locally | 32 | /* deliver cached events and clear cache entry - must be called with locally |
36 | * disabled softirqs */ | 33 | * disabled softirqs */ |
37 | static inline void | 34 | static inline void |
@@ -51,10 +48,11 @@ __nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache) | |||
51 | * by code prior to async packet handling for freeing the skb */ | 48 | * by code prior to async packet handling for freeing the skb */ |
52 | void nf_ct_deliver_cached_events(const struct nf_conn *ct) | 49 | void nf_ct_deliver_cached_events(const struct nf_conn *ct) |
53 | { | 50 | { |
51 | struct net *net = nf_ct_net(ct); | ||
54 | struct nf_conntrack_ecache *ecache; | 52 | struct nf_conntrack_ecache *ecache; |
55 | 53 | ||
56 | local_bh_disable(); | 54 | local_bh_disable(); |
57 | ecache = &__get_cpu_var(nf_conntrack_ecache); | 55 | ecache = per_cpu_ptr(net->ct.ecache, raw_smp_processor_id()); |
58 | if (ecache->ct == ct) | 56 | if (ecache->ct == ct) |
59 | __nf_ct_deliver_cached_events(ecache); | 57 | __nf_ct_deliver_cached_events(ecache); |
60 | local_bh_enable(); | 58 | local_bh_enable(); |
@@ -64,10 +62,11 @@ EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); | |||
64 | /* Deliver cached events for old pending events, if current conntrack != old */ | 62 | /* Deliver cached events for old pending events, if current conntrack != old */ |
65 | void __nf_ct_event_cache_init(struct nf_conn *ct) | 63 | void __nf_ct_event_cache_init(struct nf_conn *ct) |
66 | { | 64 | { |
65 | struct net *net = nf_ct_net(ct); | ||
67 | struct nf_conntrack_ecache *ecache; | 66 | struct nf_conntrack_ecache *ecache; |
68 | 67 | ||
69 | /* take care of delivering potentially old events */ | 68 | /* take care of delivering potentially old events */ |
70 | ecache = &__get_cpu_var(nf_conntrack_ecache); | 69 | ecache = per_cpu_ptr(net->ct.ecache, raw_smp_processor_id()); |
71 | BUG_ON(ecache->ct == ct); | 70 | BUG_ON(ecache->ct == ct); |
72 | if (ecache->ct) | 71 | if (ecache->ct) |
73 | __nf_ct_deliver_cached_events(ecache); | 72 | __nf_ct_deliver_cached_events(ecache); |
@@ -79,18 +78,31 @@ EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init); | |||
79 | 78 | ||
80 | /* flush the event cache - touches other CPU's data and must not be called | 79 | /* flush the event cache - touches other CPU's data and must not be called |
81 | * while packets are still passing through the code */ | 80 | * while packets are still passing through the code */ |
82 | void nf_ct_event_cache_flush(void) | 81 | void nf_ct_event_cache_flush(struct net *net) |
83 | { | 82 | { |
84 | struct nf_conntrack_ecache *ecache; | 83 | struct nf_conntrack_ecache *ecache; |
85 | int cpu; | 84 | int cpu; |
86 | 85 | ||
87 | for_each_possible_cpu(cpu) { | 86 | for_each_possible_cpu(cpu) { |
88 | ecache = &per_cpu(nf_conntrack_ecache, cpu); | 87 | ecache = per_cpu_ptr(net->ct.ecache, cpu); |
89 | if (ecache->ct) | 88 | if (ecache->ct) |
90 | nf_ct_put(ecache->ct); | 89 | nf_ct_put(ecache->ct); |
91 | } | 90 | } |
92 | } | 91 | } |
93 | 92 | ||
93 | int nf_conntrack_ecache_init(struct net *net) | ||
94 | { | ||
95 | net->ct.ecache = alloc_percpu(struct nf_conntrack_ecache); | ||
96 | if (!net->ct.ecache) | ||
97 | return -ENOMEM; | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | void nf_conntrack_ecache_fini(struct net *net) | ||
102 | { | ||
103 | free_percpu(net->ct.ecache); | ||
104 | } | ||
105 | |||
94 | int nf_conntrack_register_notifier(struct notifier_block *nb) | 106 | int nf_conntrack_register_notifier(struct notifier_block *nb) |
95 | { | 107 | { |
96 | return atomic_notifier_chain_register(&nf_conntrack_chain, nb); | 108 | return atomic_notifier_chain_register(&nf_conntrack_chain, nb); |