diff options
author | Harald Welte <laforge@netfilter.org> | 2005-08-09 22:28:03 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:31:24 -0400 |
commit | ac3247baf8ecadf168642e3898b0212c29c79715 (patch) | |
tree | f2b1c65f34c035491d921006efcf8b2e7a707785 /net | |
parent | abc3bc58047efa72ee9c2e208cbeb73d261ad703 (diff) |
[NETFILTER]: connection tracking event notifiers
This adds a notifier chain based event mechanism for ip_conntrack state
changes. As opposed to the previous implementations in patch-o-matic, we
do no longer need a field in the skb to achieve this.
Thanks to the valuable input from Patrick McHardy and Rusty on the idea
of a per_cpu implementation.
Signed-off-by: Harald Welte <laforge@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/netfilter/Kconfig | 10 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_conntrack_core.c | 122 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_conntrack_ftp.c | 12 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_conntrack_proto_icmp.c | 1 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_conntrack_proto_sctp.c | 2 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_conntrack_proto_tcp.c | 4 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_conntrack_proto_udp.c | 3 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_conntrack_standalone.c | 10 |
8 files changed, 154 insertions, 10 deletions
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 46d4cb1c06f0..ff3393eba924 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -40,6 +40,16 @@ config IP_NF_CONNTRACK_MARK | |||
40 | of packets, but this mark value is kept in the conntrack session | 40 | of packets, but this mark value is kept in the conntrack session |
41 | instead of the individual packets. | 41 | instead of the individual packets. |
42 | 42 | ||
43 | config IP_NF_CONNTRACK_EVENTS | ||
44 | bool "Connection tracking events" | ||
45 | depends on IP_NF_CONNTRACK | ||
46 | help | ||
47 | If this option is enabled, the connection tracking code will | ||
48 | provide a notifier chain that can be used by other kernel code | ||
49 | to get notified about changes in the connection tracking state. | ||
50 | |||
51 | IF unsure, say `N'. | ||
52 | |||
43 | config IP_NF_CT_PROTO_SCTP | 53 | config IP_NF_CT_PROTO_SCTP |
44 | tristate 'SCTP protocol connection tracking support (EXPERIMENTAL)' | 54 | tristate 'SCTP protocol connection tracking support (EXPERIMENTAL)' |
45 | depends on IP_NF_CONNTRACK && EXPERIMENTAL | 55 | depends on IP_NF_CONNTRACK && EXPERIMENTAL |
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 04c3414361d4..caf89deae116 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/err.h> | 37 | #include <linux/err.h> |
38 | #include <linux/percpu.h> | 38 | #include <linux/percpu.h> |
39 | #include <linux/moduleparam.h> | 39 | #include <linux/moduleparam.h> |
40 | #include <linux/notifier.h> | ||
40 | 41 | ||
41 | /* ip_conntrack_lock protects the main hash table, protocol/helper/expected | 42 | /* ip_conntrack_lock protects the main hash table, protocol/helper/expected |
42 | registrations, conntrack timers*/ | 43 | registrations, conntrack timers*/ |
@@ -49,7 +50,7 @@ | |||
49 | #include <linux/netfilter_ipv4/ip_conntrack_core.h> | 50 | #include <linux/netfilter_ipv4/ip_conntrack_core.h> |
50 | #include <linux/netfilter_ipv4/listhelp.h> | 51 | #include <linux/netfilter_ipv4/listhelp.h> |
51 | 52 | ||
52 | #define IP_CONNTRACK_VERSION "2.1" | 53 | #define IP_CONNTRACK_VERSION "2.2" |
53 | 54 | ||
54 | #if 0 | 55 | #if 0 |
55 | #define DEBUGP printk | 56 | #define DEBUGP printk |
@@ -76,6 +77,81 @@ unsigned int ip_ct_log_invalid; | |||
76 | static LIST_HEAD(unconfirmed); | 77 | static LIST_HEAD(unconfirmed); |
77 | static int ip_conntrack_vmalloc; | 78 | static int ip_conntrack_vmalloc; |
78 | 79 | ||
80 | #ifdef CONFIG_IP_NF_CONNTRACK_EVENTS | ||
81 | struct notifier_block *ip_conntrack_chain; | ||
82 | struct notifier_block *ip_conntrack_expect_chain; | ||
83 | |||
84 | DEFINE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache); | ||
85 | |||
86 | static inline void __deliver_cached_events(struct ip_conntrack_ecache *ecache) | ||
87 | { | ||
88 | if (is_confirmed(ecache->ct) && !is_dying(ecache->ct) && ecache->events) | ||
89 | notifier_call_chain(&ip_conntrack_chain, ecache->events, | ||
90 | ecache->ct); | ||
91 | ecache->events = 0; | ||
92 | } | ||
93 | |||
94 | void __ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache) | ||
95 | { | ||
96 | __deliver_cached_events(ecache); | ||
97 | } | ||
98 | |||
99 | /* Deliver all cached events for a particular conntrack. This is called | ||
100 | * by code prior to async packet handling or freeing the skb */ | ||
101 | void | ||
102 | ip_conntrack_deliver_cached_events_for(const struct ip_conntrack *ct) | ||
103 | { | ||
104 | struct ip_conntrack_ecache *ecache = | ||
105 | &__get_cpu_var(ip_conntrack_ecache); | ||
106 | |||
107 | if (!ct) | ||
108 | return; | ||
109 | |||
110 | if (ecache->ct == ct) { | ||
111 | DEBUGP("ecache: delivering event for %p\n", ct); | ||
112 | __deliver_cached_events(ecache); | ||
113 | } else { | ||
114 | if (net_ratelimit()) | ||
115 | printk(KERN_WARNING "ecache: want to deliver for %p, " | ||
116 | "but cache has %p\n", ct, ecache->ct); | ||
117 | } | ||
118 | |||
119 | /* signalize that events have already been delivered */ | ||
120 | ecache->ct = NULL; | ||
121 | } | ||
122 | |||
123 | /* Deliver cached events for old pending events, if current conntrack != old */ | ||
124 | void ip_conntrack_event_cache_init(const struct sk_buff *skb) | ||
125 | { | ||
126 | struct ip_conntrack *ct = (struct ip_conntrack *) skb->nfct; | ||
127 | struct ip_conntrack_ecache *ecache = | ||
128 | &__get_cpu_var(ip_conntrack_ecache); | ||
129 | |||
130 | /* take care of delivering potentially old events */ | ||
131 | if (ecache->ct != ct) { | ||
132 | enum ip_conntrack_info ctinfo; | ||
133 | /* we have to check, since at startup the cache is NULL */ | ||
134 | if (likely(ecache->ct)) { | ||
135 | DEBUGP("ecache: entered for different conntrack: " | ||
136 | "ecache->ct=%p, skb->nfct=%p. delivering " | ||
137 | "events\n", ecache->ct, ct); | ||
138 | __deliver_cached_events(ecache); | ||
139 | ip_conntrack_put(ecache->ct); | ||
140 | } else { | ||
141 | DEBUGP("ecache: entered for conntrack %p, " | ||
142 | "cache was clean before\n", ct); | ||
143 | } | ||
144 | |||
145 | /* initialize for this conntrack/packet */ | ||
146 | ecache->ct = ip_conntrack_get(skb, &ctinfo); | ||
147 | /* ecache->events cleared by __deliver_cached_devents() */ | ||
148 | } else { | ||
149 | DEBUGP("ecache: re-entered for conntrack %p.\n", ct); | ||
150 | } | ||
151 | } | ||
152 | |||
153 | #endif /* CONFIG_IP_NF_CONNTRACK_EVENTS */ | ||
154 | |||
79 | DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat); | 155 | DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat); |
80 | 156 | ||
81 | void | 157 | void |
@@ -223,6 +299,8 @@ destroy_conntrack(struct nf_conntrack *nfct) | |||
223 | IP_NF_ASSERT(atomic_read(&nfct->use) == 0); | 299 | IP_NF_ASSERT(atomic_read(&nfct->use) == 0); |
224 | IP_NF_ASSERT(!timer_pending(&ct->timeout)); | 300 | IP_NF_ASSERT(!timer_pending(&ct->timeout)); |
225 | 301 | ||
302 | set_bit(IPS_DYING_BIT, &ct->status); | ||
303 | |||
226 | /* To make sure we don't get any weird locking issues here: | 304 | /* To make sure we don't get any weird locking issues here: |
227 | * destroy_conntrack() MUST NOT be called with a write lock | 305 | * destroy_conntrack() MUST NOT be called with a write lock |
228 | * to ip_conntrack_lock!!! -HW */ | 306 | * to ip_conntrack_lock!!! -HW */ |
@@ -261,6 +339,7 @@ static void death_by_timeout(unsigned long ul_conntrack) | |||
261 | { | 339 | { |
262 | struct ip_conntrack *ct = (void *)ul_conntrack; | 340 | struct ip_conntrack *ct = (void *)ul_conntrack; |
263 | 341 | ||
342 | ip_conntrack_event(IPCT_DESTROY, ct); | ||
264 | write_lock_bh(&ip_conntrack_lock); | 343 | write_lock_bh(&ip_conntrack_lock); |
265 | /* Inside lock so preempt is disabled on module removal path. | 344 | /* Inside lock so preempt is disabled on module removal path. |
266 | * Otherwise we can get spurious warnings. */ | 345 | * Otherwise we can get spurious warnings. */ |
@@ -374,6 +453,16 @@ __ip_conntrack_confirm(struct sk_buff **pskb) | |||
374 | set_bit(IPS_CONFIRMED_BIT, &ct->status); | 453 | set_bit(IPS_CONFIRMED_BIT, &ct->status); |
375 | CONNTRACK_STAT_INC(insert); | 454 | CONNTRACK_STAT_INC(insert); |
376 | write_unlock_bh(&ip_conntrack_lock); | 455 | write_unlock_bh(&ip_conntrack_lock); |
456 | if (ct->helper) | ||
457 | ip_conntrack_event_cache(IPCT_HELPER, *pskb); | ||
458 | #ifdef CONFIG_IP_NF_NAT_NEEDED | ||
459 | if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) || | ||
460 | test_bit(IPS_DST_NAT_DONE_BIT, &ct->status)) | ||
461 | ip_conntrack_event_cache(IPCT_NATINFO, *pskb); | ||
462 | #endif | ||
463 | ip_conntrack_event_cache(master_ct(ct) ? | ||
464 | IPCT_RELATED : IPCT_NEW, *pskb); | ||
465 | |||
377 | return NF_ACCEPT; | 466 | return NF_ACCEPT; |
378 | } | 467 | } |
379 | 468 | ||
@@ -607,7 +696,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum, | |||
607 | struct ip_conntrack *ct; | 696 | struct ip_conntrack *ct; |
608 | enum ip_conntrack_info ctinfo; | 697 | enum ip_conntrack_info ctinfo; |
609 | struct ip_conntrack_protocol *proto; | 698 | struct ip_conntrack_protocol *proto; |
610 | int set_reply; | 699 | int set_reply = 0; |
611 | int ret; | 700 | int ret; |
612 | 701 | ||
613 | /* Previously seen (loopback or untracked)? Ignore. */ | 702 | /* Previously seen (loopback or untracked)? Ignore. */ |
@@ -666,6 +755,8 @@ unsigned int ip_conntrack_in(unsigned int hooknum, | |||
666 | 755 | ||
667 | IP_NF_ASSERT((*pskb)->nfct); | 756 | IP_NF_ASSERT((*pskb)->nfct); |
668 | 757 | ||
758 | ip_conntrack_event_cache_init(*pskb); | ||
759 | |||
669 | ret = proto->packet(ct, *pskb, ctinfo); | 760 | ret = proto->packet(ct, *pskb, ctinfo); |
670 | if (ret < 0) { | 761 | if (ret < 0) { |
671 | /* Invalid: inverse of the return code tells | 762 | /* Invalid: inverse of the return code tells |
@@ -676,8 +767,8 @@ unsigned int ip_conntrack_in(unsigned int hooknum, | |||
676 | return -ret; | 767 | return -ret; |
677 | } | 768 | } |
678 | 769 | ||
679 | if (set_reply) | 770 | if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) |
680 | set_bit(IPS_SEEN_REPLY_BIT, &ct->status); | 771 | ip_conntrack_event_cache(IPCT_STATUS, *pskb); |
681 | 772 | ||
682 | return ret; | 773 | return ret; |
683 | } | 774 | } |
@@ -824,6 +915,7 @@ int ip_conntrack_expect_related(struct ip_conntrack_expect *expect) | |||
824 | evict_oldest_expect(expect->master); | 915 | evict_oldest_expect(expect->master); |
825 | 916 | ||
826 | ip_conntrack_expect_insert(expect); | 917 | ip_conntrack_expect_insert(expect); |
918 | ip_conntrack_expect_event(IPEXP_NEW, expect); | ||
827 | ret = 0; | 919 | ret = 0; |
828 | out: | 920 | out: |
829 | write_unlock_bh(&ip_conntrack_lock); | 921 | write_unlock_bh(&ip_conntrack_lock); |
@@ -861,8 +953,10 @@ int ip_conntrack_helper_register(struct ip_conntrack_helper *me) | |||
861 | static inline int unhelp(struct ip_conntrack_tuple_hash *i, | 953 | static inline int unhelp(struct ip_conntrack_tuple_hash *i, |
862 | const struct ip_conntrack_helper *me) | 954 | const struct ip_conntrack_helper *me) |
863 | { | 955 | { |
864 | if (tuplehash_to_ctrack(i)->helper == me) | 956 | if (tuplehash_to_ctrack(i)->helper == me) { |
957 | ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i)); | ||
865 | tuplehash_to_ctrack(i)->helper = NULL; | 958 | tuplehash_to_ctrack(i)->helper = NULL; |
959 | } | ||
866 | return 0; | 960 | return 0; |
867 | } | 961 | } |
868 | 962 | ||
@@ -924,6 +1018,7 @@ void ip_ct_refresh_acct(struct ip_conntrack *ct, | |||
924 | if (del_timer(&ct->timeout)) { | 1018 | if (del_timer(&ct->timeout)) { |
925 | ct->timeout.expires = jiffies + extra_jiffies; | 1019 | ct->timeout.expires = jiffies + extra_jiffies; |
926 | add_timer(&ct->timeout); | 1020 | add_timer(&ct->timeout); |
1021 | ip_conntrack_event_cache(IPCT_REFRESH, skb); | ||
927 | } | 1022 | } |
928 | ct_add_counters(ct, ctinfo, skb); | 1023 | ct_add_counters(ct, ctinfo, skb); |
929 | write_unlock_bh(&ip_conntrack_lock); | 1024 | write_unlock_bh(&ip_conntrack_lock); |
@@ -1012,6 +1107,23 @@ ip_ct_iterate_cleanup(int (*iter)(struct ip_conntrack *i, void *), void *data) | |||
1012 | 1107 | ||
1013 | ip_conntrack_put(ct); | 1108 | ip_conntrack_put(ct); |
1014 | } | 1109 | } |
1110 | |||
1111 | #ifdef CONFIG_IP_NF_CONNTRACK_EVENTS | ||
1112 | { | ||
1113 | /* we need to deliver all cached events in order to drop | ||
1114 | * the reference counts */ | ||
1115 | int cpu; | ||
1116 | for_each_cpu(cpu) { | ||
1117 | struct ip_conntrack_ecache *ecache = | ||
1118 | &per_cpu(ip_conntrack_ecache, cpu); | ||
1119 | if (ecache->ct) { | ||
1120 | __ip_ct_deliver_cached_events(ecache); | ||
1121 | ip_conntrack_put(ecache->ct); | ||
1122 | ecache->ct = NULL; | ||
1123 | } | ||
1124 | } | ||
1125 | } | ||
1126 | #endif | ||
1015 | } | 1127 | } |
1016 | 1128 | ||
1017 | /* Fast function for those who don't want to parse /proc (and I don't | 1129 | /* Fast function for those who don't want to parse /proc (and I don't |
diff --git a/net/ipv4/netfilter/ip_conntrack_ftp.c b/net/ipv4/netfilter/ip_conntrack_ftp.c index 7a3b773be3f9..9658896f899a 100644 --- a/net/ipv4/netfilter/ip_conntrack_ftp.c +++ b/net/ipv4/netfilter/ip_conntrack_ftp.c | |||
@@ -262,7 +262,8 @@ static int find_nl_seq(u32 seq, const struct ip_ct_ftp_master *info, int dir) | |||
262 | } | 262 | } |
263 | 263 | ||
264 | /* We don't update if it's older than what we have. */ | 264 | /* We don't update if it's older than what we have. */ |
265 | static void update_nl_seq(u32 nl_seq, struct ip_ct_ftp_master *info, int dir) | 265 | static void update_nl_seq(u32 nl_seq, struct ip_ct_ftp_master *info, int dir, |
266 | struct sk_buff *skb) | ||
266 | { | 267 | { |
267 | unsigned int i, oldest = NUM_SEQ_TO_REMEMBER; | 268 | unsigned int i, oldest = NUM_SEQ_TO_REMEMBER; |
268 | 269 | ||
@@ -276,10 +277,13 @@ static void update_nl_seq(u32 nl_seq, struct ip_ct_ftp_master *info, int dir) | |||
276 | oldest = i; | 277 | oldest = i; |
277 | } | 278 | } |
278 | 279 | ||
279 | if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) | 280 | if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) { |
280 | info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq; | 281 | info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq; |
281 | else if (oldest != NUM_SEQ_TO_REMEMBER) | 282 | ip_conntrack_event_cache(IPCT_HELPINFO_VOLATILE, skb); |
283 | } else if (oldest != NUM_SEQ_TO_REMEMBER) { | ||
282 | info->seq_aft_nl[dir][oldest] = nl_seq; | 284 | info->seq_aft_nl[dir][oldest] = nl_seq; |
285 | ip_conntrack_event_cache(IPCT_HELPINFO_VOLATILE, skb); | ||
286 | } | ||
283 | } | 287 | } |
284 | 288 | ||
285 | static int help(struct sk_buff **pskb, | 289 | static int help(struct sk_buff **pskb, |
@@ -439,7 +443,7 @@ out_update_nl: | |||
439 | /* Now if this ends in \n, update ftp info. Seq may have been | 443 | /* Now if this ends in \n, update ftp info. Seq may have been |
440 | * adjusted by NAT code. */ | 444 | * adjusted by NAT code. */ |
441 | if (ends_in_nl) | 445 | if (ends_in_nl) |
442 | update_nl_seq(seq, ct_ftp_info,dir); | 446 | update_nl_seq(seq, ct_ftp_info,dir, *pskb); |
443 | out: | 447 | out: |
444 | spin_unlock_bh(&ip_ftp_lock); | 448 | spin_unlock_bh(&ip_ftp_lock); |
445 | return ret; | 449 | return ret; |
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c index 602c74db3252..dca1f63d6f51 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c | |||
@@ -102,6 +102,7 @@ static int icmp_packet(struct ip_conntrack *ct, | |||
102 | ct->timeout.function((unsigned long)ct); | 102 | ct->timeout.function((unsigned long)ct); |
103 | } else { | 103 | } else { |
104 | atomic_inc(&ct->proto.icmp.count); | 104 | atomic_inc(&ct->proto.icmp.count); |
105 | ip_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); | ||
105 | ip_ct_refresh_acct(ct, ctinfo, skb, ip_ct_icmp_timeout); | 106 | ip_ct_refresh_acct(ct, ctinfo, skb, ip_ct_icmp_timeout); |
106 | } | 107 | } |
107 | 108 | ||
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c index 31d75390bf12..3d5f878a07d1 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c | |||
@@ -404,6 +404,8 @@ static int sctp_packet(struct ip_conntrack *conntrack, | |||
404 | } | 404 | } |
405 | 405 | ||
406 | conntrack->proto.sctp.state = newconntrack; | 406 | conntrack->proto.sctp.state = newconntrack; |
407 | if (oldsctpstate != newconntrack) | ||
408 | ip_conntrack_event_cache(IPCT_PROTOINFO, skb); | ||
407 | write_unlock_bh(&sctp_lock); | 409 | write_unlock_bh(&sctp_lock); |
408 | } | 410 | } |
409 | 411 | ||
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c index 809dfed766d4..a569ad1ee4d9 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c | |||
@@ -973,6 +973,10 @@ static int tcp_packet(struct ip_conntrack *conntrack, | |||
973 | ? ip_ct_tcp_timeout_max_retrans : *tcp_timeouts[new_state]; | 973 | ? ip_ct_tcp_timeout_max_retrans : *tcp_timeouts[new_state]; |
974 | write_unlock_bh(&tcp_lock); | 974 | write_unlock_bh(&tcp_lock); |
975 | 975 | ||
976 | ip_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); | ||
977 | if (new_state != old_state) | ||
978 | ip_conntrack_event_cache(IPCT_PROTOINFO, skb); | ||
979 | |||
976 | if (!test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) { | 980 | if (!test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) { |
977 | /* If only reply is a RST, we can consider ourselves not to | 981 | /* If only reply is a RST, we can consider ourselves not to |
978 | have an established connection: this is a fairly common | 982 | have an established connection: this is a fairly common |
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_udp.c b/net/ipv4/netfilter/ip_conntrack_proto_udp.c index 8c1eaba098d4..6066eaf4d825 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_udp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_udp.c | |||
@@ -73,7 +73,8 @@ static int udp_packet(struct ip_conntrack *conntrack, | |||
73 | ip_ct_refresh_acct(conntrack, ctinfo, skb, | 73 | ip_ct_refresh_acct(conntrack, ctinfo, skb, |
74 | ip_ct_udp_timeout_stream); | 74 | ip_ct_udp_timeout_stream); |
75 | /* Also, more likely to be important, and not a probe */ | 75 | /* Also, more likely to be important, and not a probe */ |
76 | set_bit(IPS_ASSURED_BIT, &conntrack->status); | 76 | if (!test_and_set_bit(IPS_ASSURED_BIT, &conntrack->status)) |
77 | ip_conntrack_event_cache(IPCT_STATUS, skb); | ||
77 | } else | 78 | } else |
78 | ip_ct_refresh_acct(conntrack, ctinfo, skb, ip_ct_udp_timeout); | 79 | ip_ct_refresh_acct(conntrack, ctinfo, skb, ip_ct_udp_timeout); |
79 | 80 | ||
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c index dccd4abab7ae..f0880004115d 100644 --- a/net/ipv4/netfilter/ip_conntrack_standalone.c +++ b/net/ipv4/netfilter/ip_conntrack_standalone.c | |||
@@ -402,6 +402,7 @@ static unsigned int ip_confirm(unsigned int hooknum, | |||
402 | const struct net_device *out, | 402 | const struct net_device *out, |
403 | int (*okfn)(struct sk_buff *)) | 403 | int (*okfn)(struct sk_buff *)) |
404 | { | 404 | { |
405 | ip_conntrack_event_cache_init(*pskb); | ||
405 | /* We've seen it coming out the other side: confirm it */ | 406 | /* We've seen it coming out the other side: confirm it */ |
406 | return ip_conntrack_confirm(pskb); | 407 | return ip_conntrack_confirm(pskb); |
407 | } | 408 | } |
@@ -419,6 +420,7 @@ static unsigned int ip_conntrack_help(unsigned int hooknum, | |||
419 | ct = ip_conntrack_get(*pskb, &ctinfo); | 420 | ct = ip_conntrack_get(*pskb, &ctinfo); |
420 | if (ct && ct->helper) { | 421 | if (ct && ct->helper) { |
421 | unsigned int ret; | 422 | unsigned int ret; |
423 | ip_conntrack_event_cache_init(*pskb); | ||
422 | ret = ct->helper->help(pskb, ct, ctinfo); | 424 | ret = ct->helper->help(pskb, ct, ctinfo); |
423 | if (ret != NF_ACCEPT) | 425 | if (ret != NF_ACCEPT) |
424 | return ret; | 426 | return ret; |
@@ -889,6 +891,7 @@ static int init_or_cleanup(int init) | |||
889 | return ret; | 891 | return ret; |
890 | 892 | ||
891 | cleanup: | 893 | cleanup: |
894 | synchronize_net(); | ||
892 | #ifdef CONFIG_SYSCTL | 895 | #ifdef CONFIG_SYSCTL |
893 | unregister_sysctl_table(ip_ct_sysctl_header); | 896 | unregister_sysctl_table(ip_ct_sysctl_header); |
894 | cleanup_localinops: | 897 | cleanup_localinops: |
@@ -971,6 +974,13 @@ void need_ip_conntrack(void) | |||
971 | { | 974 | { |
972 | } | 975 | } |
973 | 976 | ||
977 | #ifdef CONFIG_IP_NF_CONNTRACK_EVENTS | ||
978 | EXPORT_SYMBOL_GPL(ip_conntrack_chain); | ||
979 | EXPORT_SYMBOL_GPL(ip_conntrack_expect_chain); | ||
980 | EXPORT_SYMBOL_GPL(ip_conntrack_register_notifier); | ||
981 | EXPORT_SYMBOL_GPL(ip_conntrack_unregister_notifier); | ||
982 | EXPORT_PER_CPU_SYMBOL_GPL(ip_conntrack_ecache); | ||
983 | #endif | ||
974 | EXPORT_SYMBOL(ip_conntrack_protocol_register); | 984 | EXPORT_SYMBOL(ip_conntrack_protocol_register); |
975 | EXPORT_SYMBOL(ip_conntrack_protocol_unregister); | 985 | EXPORT_SYMBOL(ip_conntrack_protocol_unregister); |
976 | EXPORT_SYMBOL(ip_ct_get_tuple); | 986 | EXPORT_SYMBOL(ip_ct_get_tuple); |