diff options
71 files changed, 768 insertions, 709 deletions
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index d4c4c5120bc0..70d3b4f1e48d 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h | |||
@@ -172,8 +172,8 @@ struct nf_logger { | |||
172 | 172 | ||
173 | /* Function to register/unregister log function. */ | 173 | /* Function to register/unregister log function. */ |
174 | int nf_log_register(int pf, struct nf_logger *logger); | 174 | int nf_log_register(int pf, struct nf_logger *logger); |
175 | int nf_log_unregister_pf(int pf); | 175 | void nf_log_unregister(struct nf_logger *logger); |
176 | void nf_log_unregister_logger(struct nf_logger *logger); | 176 | void nf_log_unregister_pf(int pf); |
177 | 177 | ||
178 | /* Calls the registered backend logging function */ | 178 | /* Calls the registered backend logging function */ |
179 | void nf_log_packet(int pf, | 179 | void nf_log_packet(int pf, |
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h index 33581c13d947..da9274e6bf12 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack.h +++ b/include/linux/netfilter_ipv4/ip_conntrack.h | |||
@@ -301,6 +301,12 @@ extern unsigned int ip_conntrack_htable_size; | |||
301 | extern int ip_conntrack_checksum; | 301 | extern int ip_conntrack_checksum; |
302 | 302 | ||
303 | #define CONNTRACK_STAT_INC(count) (__get_cpu_var(ip_conntrack_stat).count++) | 303 | #define CONNTRACK_STAT_INC(count) (__get_cpu_var(ip_conntrack_stat).count++) |
304 | #define CONNTRACK_STAT_INC_ATOMIC(count) \ | ||
305 | do { \ | ||
306 | local_bh_disable(); \ | ||
307 | __get_cpu_var(ip_conntrack_stat).count++; \ | ||
308 | local_bh_enable(); \ | ||
309 | } while (0) | ||
304 | 310 | ||
305 | #ifdef CONFIG_IP_NF_CONNTRACK_EVENTS | 311 | #ifdef CONFIG_IP_NF_CONNTRACK_EVENTS |
306 | #include <linux/notifier.h> | 312 | #include <linux/notifier.h> |
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 68ec27490c20..0e690e34c00b 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h | |||
@@ -257,6 +257,12 @@ extern int nf_conntrack_max; | |||
257 | 257 | ||
258 | DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); | 258 | DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); |
259 | #define NF_CT_STAT_INC(count) (__get_cpu_var(nf_conntrack_stat).count++) | 259 | #define NF_CT_STAT_INC(count) (__get_cpu_var(nf_conntrack_stat).count++) |
260 | #define NF_CT_STAT_INC_ATOMIC(count) \ | ||
261 | do { \ | ||
262 | local_bh_disable(); \ | ||
263 | __get_cpu_var(nf_conntrack_stat).count++; \ | ||
264 | local_bh_enable(); \ | ||
265 | } while (0) | ||
260 | 266 | ||
261 | /* no helper, no nat */ | 267 | /* no helper, no nat */ |
262 | #define NF_CT_F_BASIC 0 | 268 | #define NF_CT_F_BASIC 0 |
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index 664ddcffe00d..eb575cbd4c95 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h | |||
@@ -89,7 +89,7 @@ extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX]; | |||
89 | 89 | ||
90 | /* Protocol registration. */ | 90 | /* Protocol registration. */ |
91 | extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto); | 91 | extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto); |
92 | extern int nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto); | 92 | extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto); |
93 | 93 | ||
94 | extern struct nf_conntrack_l3proto * | 94 | extern struct nf_conntrack_l3proto * |
95 | nf_ct_l3proto_find_get(u_int16_t l3proto); | 95 | nf_ct_l3proto_find_get(u_int16_t l3proto); |
@@ -106,7 +106,7 @@ __nf_ct_l3proto_find(u_int16_t l3proto) | |||
106 | { | 106 | { |
107 | if (unlikely(l3proto >= AF_MAX)) | 107 | if (unlikely(l3proto >= AF_MAX)) |
108 | return &nf_conntrack_l3proto_generic; | 108 | return &nf_conntrack_l3proto_generic; |
109 | return nf_ct_l3protos[l3proto]; | 109 | return rcu_dereference(nf_ct_l3protos[l3proto]); |
110 | } | 110 | } |
111 | 111 | ||
112 | #endif /*_NF_CONNTRACK_L3PROTO_H*/ | 112 | #endif /*_NF_CONNTRACK_L3PROTO_H*/ |
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index fc8af08ff542..8415182ec126 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h | |||
@@ -109,7 +109,7 @@ extern void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p); | |||
109 | 109 | ||
110 | /* Protocol registration. */ | 110 | /* Protocol registration. */ |
111 | extern int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *proto); | 111 | extern int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *proto); |
112 | extern int nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *proto); | 112 | extern void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *proto); |
113 | 113 | ||
114 | /* Generic netlink helpers */ | 114 | /* Generic netlink helpers */ |
115 | extern int nf_ct_port_tuple_to_nfattr(struct sk_buff *skb, | 115 | extern int nf_ct_port_tuple_to_nfattr(struct sk_buff *skb, |
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index f9a5ae9d5b6d..45712aec6a0e 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c | |||
@@ -208,7 +208,7 @@ static int __init ebt_log_init(void) | |||
208 | 208 | ||
209 | static void __exit ebt_log_fini(void) | 209 | static void __exit ebt_log_fini(void) |
210 | { | 210 | { |
211 | nf_log_unregister_logger(&ebt_log_logger); | 211 | nf_log_unregister(&ebt_log_logger); |
212 | ebt_unregister_watcher(&log); | 212 | ebt_unregister_watcher(&log); |
213 | } | 213 | } |
214 | 214 | ||
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index 2e4cb24e191a..8e15cc47f6c0 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c | |||
@@ -323,7 +323,7 @@ static void __exit ebt_ulog_fini(void) | |||
323 | ebt_ulog_buff_t *ub; | 323 | ebt_ulog_buff_t *ub; |
324 | int i; | 324 | int i; |
325 | 325 | ||
326 | nf_log_unregister_logger(&ebt_ulog_logger); | 326 | nf_log_unregister(&ebt_ulog_logger); |
327 | ebt_unregister_watcher(&ulog); | 327 | ebt_unregister_watcher(&ulog); |
328 | for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) { | 328 | for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) { |
329 | ub = &ulog_buffers[i]; | 329 | ub = &ulog_buffers[i]; |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 9b08e7ad71bc..601808c796ec 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -226,7 +226,7 @@ config IP_NF_QUEUE | |||
226 | 226 | ||
227 | config IP_NF_IPTABLES | 227 | config IP_NF_IPTABLES |
228 | tristate "IP tables support (required for filtering/masq/NAT)" | 228 | tristate "IP tables support (required for filtering/masq/NAT)" |
229 | depends on NETFILTER_XTABLES | 229 | select NETFILTER_XTABLES |
230 | help | 230 | help |
231 | iptables is a general, extensible packet identification framework. | 231 | iptables is a general, extensible packet identification framework. |
232 | The packet filtering and full NAT (masquerading, port forwarding, | 232 | The packet filtering and full NAT (masquerading, port forwarding, |
@@ -606,7 +606,9 @@ config IP_NF_TARGET_TTL | |||
606 | config IP_NF_TARGET_CLUSTERIP | 606 | config IP_NF_TARGET_CLUSTERIP |
607 | tristate "CLUSTERIP target support (EXPERIMENTAL)" | 607 | tristate "CLUSTERIP target support (EXPERIMENTAL)" |
608 | depends on IP_NF_MANGLE && EXPERIMENTAL | 608 | depends on IP_NF_MANGLE && EXPERIMENTAL |
609 | depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4) | 609 | depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4 |
610 | select IP_NF_CONNTRACK_MARK if IP_NF_CONNTRACK | ||
611 | select NF_CONNTRACK_MARK if NF_CONNTRACK_IPV4 | ||
610 | help | 612 | help |
611 | The CLUSTERIP target allows you to build load-balancing clusters of | 613 | The CLUSTERIP target allows you to build load-balancing clusters of |
612 | network servers without having a dedicated load-balancing | 614 | network servers without having a dedicated load-balancing |
@@ -629,7 +631,7 @@ config IP_NF_RAW | |||
629 | # ARP tables | 631 | # ARP tables |
630 | config IP_NF_ARPTABLES | 632 | config IP_NF_ARPTABLES |
631 | tristate "ARP tables support" | 633 | tristate "ARP tables support" |
632 | depends on NETFILTER_XTABLES | 634 | select NETFILTER_XTABLES |
633 | help | 635 | help |
634 | arptables is a general, extensible packet identification framework. | 636 | arptables is a general, extensible packet identification framework. |
635 | The ARP packet filtering and mangling (manipulation)subsystems | 637 | The ARP packet filtering and mangling (manipulation)subsystems |
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 04e466d53c0b..07ba1dd136b5 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c | |||
@@ -303,6 +303,7 @@ destroy_conntrack(struct nf_conntrack *nfct) | |||
303 | struct ip_conntrack *ct = (struct ip_conntrack *)nfct; | 303 | struct ip_conntrack *ct = (struct ip_conntrack *)nfct; |
304 | struct ip_conntrack_protocol *proto; | 304 | struct ip_conntrack_protocol *proto; |
305 | struct ip_conntrack_helper *helper; | 305 | struct ip_conntrack_helper *helper; |
306 | typeof(ip_conntrack_destroyed) destroyed; | ||
306 | 307 | ||
307 | DEBUGP("destroy_conntrack(%p)\n", ct); | 308 | DEBUGP("destroy_conntrack(%p)\n", ct); |
308 | IP_NF_ASSERT(atomic_read(&nfct->use) == 0); | 309 | IP_NF_ASSERT(atomic_read(&nfct->use) == 0); |
@@ -318,12 +319,16 @@ destroy_conntrack(struct nf_conntrack *nfct) | |||
318 | /* To make sure we don't get any weird locking issues here: | 319 | /* To make sure we don't get any weird locking issues here: |
319 | * destroy_conntrack() MUST NOT be called with a write lock | 320 | * destroy_conntrack() MUST NOT be called with a write lock |
320 | * to ip_conntrack_lock!!! -HW */ | 321 | * to ip_conntrack_lock!!! -HW */ |
322 | rcu_read_lock(); | ||
321 | proto = __ip_conntrack_proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum); | 323 | proto = __ip_conntrack_proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum); |
322 | if (proto && proto->destroy) | 324 | if (proto && proto->destroy) |
323 | proto->destroy(ct); | 325 | proto->destroy(ct); |
324 | 326 | ||
325 | if (ip_conntrack_destroyed) | 327 | destroyed = rcu_dereference(ip_conntrack_destroyed); |
326 | ip_conntrack_destroyed(ct); | 328 | if (destroyed) |
329 | destroyed(ct); | ||
330 | |||
331 | rcu_read_unlock(); | ||
327 | 332 | ||
328 | write_lock_bh(&ip_conntrack_lock); | 333 | write_lock_bh(&ip_conntrack_lock); |
329 | /* Expectations will have been removed in clean_from_lists, | 334 | /* Expectations will have been removed in clean_from_lists, |
@@ -536,7 +541,7 @@ static int early_drop(struct list_head *chain) | |||
536 | if (del_timer(&ct->timeout)) { | 541 | if (del_timer(&ct->timeout)) { |
537 | death_by_timeout((unsigned long)ct); | 542 | death_by_timeout((unsigned long)ct); |
538 | dropped = 1; | 543 | dropped = 1; |
539 | CONNTRACK_STAT_INC(early_drop); | 544 | CONNTRACK_STAT_INC_ATOMIC(early_drop); |
540 | } | 545 | } |
541 | ip_conntrack_put(ct); | 546 | ip_conntrack_put(ct); |
542 | return dropped; | 547 | return dropped; |
@@ -595,13 +600,13 @@ ip_conntrack_proto_find_get(u_int8_t protocol) | |||
595 | { | 600 | { |
596 | struct ip_conntrack_protocol *p; | 601 | struct ip_conntrack_protocol *p; |
597 | 602 | ||
598 | preempt_disable(); | 603 | rcu_read_lock(); |
599 | p = __ip_conntrack_proto_find(protocol); | 604 | p = __ip_conntrack_proto_find(protocol); |
600 | if (p) { | 605 | if (p) { |
601 | if (!try_module_get(p->me)) | 606 | if (!try_module_get(p->me)) |
602 | p = &ip_conntrack_generic_protocol; | 607 | p = &ip_conntrack_generic_protocol; |
603 | } | 608 | } |
604 | preempt_enable(); | 609 | rcu_read_unlock(); |
605 | 610 | ||
606 | return p; | 611 | return p; |
607 | } | 612 | } |
@@ -802,7 +807,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum, | |||
802 | 807 | ||
803 | /* Previously seen (loopback or untracked)? Ignore. */ | 808 | /* Previously seen (loopback or untracked)? Ignore. */ |
804 | if ((*pskb)->nfct) { | 809 | if ((*pskb)->nfct) { |
805 | CONNTRACK_STAT_INC(ignore); | 810 | CONNTRACK_STAT_INC_ATOMIC(ignore); |
806 | return NF_ACCEPT; | 811 | return NF_ACCEPT; |
807 | } | 812 | } |
808 | 813 | ||
@@ -830,6 +835,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum, | |||
830 | } | 835 | } |
831 | #endif | 836 | #endif |
832 | 837 | ||
838 | /* rcu_read_lock()ed by nf_hook_slow */ | ||
833 | proto = __ip_conntrack_proto_find((*pskb)->nh.iph->protocol); | 839 | proto = __ip_conntrack_proto_find((*pskb)->nh.iph->protocol); |
834 | 840 | ||
835 | /* It may be an special packet, error, unclean... | 841 | /* It may be an special packet, error, unclean... |
@@ -837,20 +843,20 @@ unsigned int ip_conntrack_in(unsigned int hooknum, | |||
837 | * core what to do with the packet. */ | 843 | * core what to do with the packet. */ |
838 | if (proto->error != NULL | 844 | if (proto->error != NULL |
839 | && (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) { | 845 | && (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) { |
840 | CONNTRACK_STAT_INC(error); | 846 | CONNTRACK_STAT_INC_ATOMIC(error); |
841 | CONNTRACK_STAT_INC(invalid); | 847 | CONNTRACK_STAT_INC_ATOMIC(invalid); |
842 | return -ret; | 848 | return -ret; |
843 | } | 849 | } |
844 | 850 | ||
845 | if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo))) { | 851 | if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo))) { |
846 | /* Not valid part of a connection */ | 852 | /* Not valid part of a connection */ |
847 | CONNTRACK_STAT_INC(invalid); | 853 | CONNTRACK_STAT_INC_ATOMIC(invalid); |
848 | return NF_ACCEPT; | 854 | return NF_ACCEPT; |
849 | } | 855 | } |
850 | 856 | ||
851 | if (IS_ERR(ct)) { | 857 | if (IS_ERR(ct)) { |
852 | /* Too stressed to deal. */ | 858 | /* Too stressed to deal. */ |
853 | CONNTRACK_STAT_INC(drop); | 859 | CONNTRACK_STAT_INC_ATOMIC(drop); |
854 | return NF_DROP; | 860 | return NF_DROP; |
855 | } | 861 | } |
856 | 862 | ||
@@ -862,7 +868,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum, | |||
862 | * the netfilter core what to do*/ | 868 | * the netfilter core what to do*/ |
863 | nf_conntrack_put((*pskb)->nfct); | 869 | nf_conntrack_put((*pskb)->nfct); |
864 | (*pskb)->nfct = NULL; | 870 | (*pskb)->nfct = NULL; |
865 | CONNTRACK_STAT_INC(invalid); | 871 | CONNTRACK_STAT_INC_ATOMIC(invalid); |
866 | return -ret; | 872 | return -ret; |
867 | } | 873 | } |
868 | 874 | ||
@@ -875,8 +881,15 @@ unsigned int ip_conntrack_in(unsigned int hooknum, | |||
875 | int invert_tuplepr(struct ip_conntrack_tuple *inverse, | 881 | int invert_tuplepr(struct ip_conntrack_tuple *inverse, |
876 | const struct ip_conntrack_tuple *orig) | 882 | const struct ip_conntrack_tuple *orig) |
877 | { | 883 | { |
878 | return ip_ct_invert_tuple(inverse, orig, | 884 | struct ip_conntrack_protocol *proto; |
879 | __ip_conntrack_proto_find(orig->dst.protonum)); | 885 | int ret; |
886 | |||
887 | rcu_read_lock(); | ||
888 | proto = __ip_conntrack_proto_find(orig->dst.protonum); | ||
889 | ret = ip_ct_invert_tuple(inverse, orig, proto); | ||
890 | rcu_read_unlock(); | ||
891 | |||
892 | return ret; | ||
880 | } | 893 | } |
881 | 894 | ||
882 | /* Would two expected things clash? */ | 895 | /* Would two expected things clash? */ |
@@ -1354,7 +1367,7 @@ static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size) | |||
1354 | supposed to kill the mall. */ | 1367 | supposed to kill the mall. */ |
1355 | void ip_conntrack_cleanup(void) | 1368 | void ip_conntrack_cleanup(void) |
1356 | { | 1369 | { |
1357 | ip_ct_attach = NULL; | 1370 | rcu_assign_pointer(ip_ct_attach, NULL); |
1358 | 1371 | ||
1359 | /* This makes sure all current packets have passed through | 1372 | /* This makes sure all current packets have passed through |
1360 | netfilter framework. Roll on, two-stage module | 1373 | netfilter framework. Roll on, two-stage module |
@@ -1507,15 +1520,15 @@ int __init ip_conntrack_init(void) | |||
1507 | /* Don't NEED lock here, but good form anyway. */ | 1520 | /* Don't NEED lock here, but good form anyway. */ |
1508 | write_lock_bh(&ip_conntrack_lock); | 1521 | write_lock_bh(&ip_conntrack_lock); |
1509 | for (i = 0; i < MAX_IP_CT_PROTO; i++) | 1522 | for (i = 0; i < MAX_IP_CT_PROTO; i++) |
1510 | ip_ct_protos[i] = &ip_conntrack_generic_protocol; | 1523 | rcu_assign_pointer(ip_ct_protos[i], &ip_conntrack_generic_protocol); |
1511 | /* Sew in builtin protocols. */ | 1524 | /* Sew in builtin protocols. */ |
1512 | ip_ct_protos[IPPROTO_TCP] = &ip_conntrack_protocol_tcp; | 1525 | rcu_assign_pointer(ip_ct_protos[IPPROTO_TCP], &ip_conntrack_protocol_tcp); |
1513 | ip_ct_protos[IPPROTO_UDP] = &ip_conntrack_protocol_udp; | 1526 | rcu_assign_pointer(ip_ct_protos[IPPROTO_UDP], &ip_conntrack_protocol_udp); |
1514 | ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp; | 1527 | rcu_assign_pointer(ip_ct_protos[IPPROTO_ICMP], &ip_conntrack_protocol_icmp); |
1515 | write_unlock_bh(&ip_conntrack_lock); | 1528 | write_unlock_bh(&ip_conntrack_lock); |
1516 | 1529 | ||
1517 | /* For use by ipt_REJECT */ | 1530 | /* For use by ipt_REJECT */ |
1518 | ip_ct_attach = ip_conntrack_attach; | 1531 | rcu_assign_pointer(ip_ct_attach, ip_conntrack_attach); |
1519 | 1532 | ||
1520 | /* Set up fake conntrack: | 1533 | /* Set up fake conntrack: |
1521 | - to never be deleted, not in any hashes */ | 1534 | - to never be deleted, not in any hashes */ |
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c index 300ccbbbdac9..c7c1ec61b0f5 100644 --- a/net/ipv4/netfilter/ip_conntrack_standalone.c +++ b/net/ipv4/netfilter/ip_conntrack_standalone.c | |||
@@ -796,7 +796,7 @@ int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto) | |||
796 | ret = -EBUSY; | 796 | ret = -EBUSY; |
797 | goto out; | 797 | goto out; |
798 | } | 798 | } |
799 | ip_ct_protos[proto->proto] = proto; | 799 | rcu_assign_pointer(ip_ct_protos[proto->proto], proto); |
800 | out: | 800 | out: |
801 | write_unlock_bh(&ip_conntrack_lock); | 801 | write_unlock_bh(&ip_conntrack_lock); |
802 | return ret; | 802 | return ret; |
@@ -805,11 +805,10 @@ int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto) | |||
805 | void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto) | 805 | void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto) |
806 | { | 806 | { |
807 | write_lock_bh(&ip_conntrack_lock); | 807 | write_lock_bh(&ip_conntrack_lock); |
808 | ip_ct_protos[proto->proto] = &ip_conntrack_generic_protocol; | 808 | rcu_assign_pointer(ip_ct_protos[proto->proto], |
809 | &ip_conntrack_generic_protocol); | ||
809 | write_unlock_bh(&ip_conntrack_lock); | 810 | write_unlock_bh(&ip_conntrack_lock); |
810 | 811 | synchronize_rcu(); | |
811 | /* Somebody could be still looking at the proto in bh. */ | ||
812 | synchronize_net(); | ||
813 | 812 | ||
814 | /* Remove all contrack entries for this protocol */ | 813 | /* Remove all contrack entries for this protocol */ |
815 | ip_ct_iterate_cleanup(kill_proto, &proto->proto); | 814 | ip_ct_iterate_cleanup(kill_proto, &proto->proto); |
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c index 275a4d3faf0a..40737fdbe9a7 100644 --- a/net/ipv4/netfilter/ip_nat_core.c +++ b/net/ipv4/netfilter/ip_nat_core.c | |||
@@ -50,7 +50,7 @@ static struct ip_nat_protocol *ip_nat_protos[MAX_IP_NAT_PROTO]; | |||
50 | static inline struct ip_nat_protocol * | 50 | static inline struct ip_nat_protocol * |
51 | __ip_nat_proto_find(u_int8_t protonum) | 51 | __ip_nat_proto_find(u_int8_t protonum) |
52 | { | 52 | { |
53 | return ip_nat_protos[protonum]; | 53 | return rcu_dereference(ip_nat_protos[protonum]); |
54 | } | 54 | } |
55 | 55 | ||
56 | struct ip_nat_protocol * | 56 | struct ip_nat_protocol * |
@@ -58,13 +58,11 @@ ip_nat_proto_find_get(u_int8_t protonum) | |||
58 | { | 58 | { |
59 | struct ip_nat_protocol *p; | 59 | struct ip_nat_protocol *p; |
60 | 60 | ||
61 | /* we need to disable preemption to make sure 'p' doesn't get | 61 | rcu_read_lock(); |
62 | * removed until we've grabbed the reference */ | ||
63 | preempt_disable(); | ||
64 | p = __ip_nat_proto_find(protonum); | 62 | p = __ip_nat_proto_find(protonum); |
65 | if (!try_module_get(p->me)) | 63 | if (!try_module_get(p->me)) |
66 | p = &ip_nat_unknown_protocol; | 64 | p = &ip_nat_unknown_protocol; |
67 | preempt_enable(); | 65 | rcu_read_unlock(); |
68 | 66 | ||
69 | return p; | 67 | return p; |
70 | } | 68 | } |
@@ -120,8 +118,8 @@ static int | |||
120 | in_range(const struct ip_conntrack_tuple *tuple, | 118 | in_range(const struct ip_conntrack_tuple *tuple, |
121 | const struct ip_nat_range *range) | 119 | const struct ip_nat_range *range) |
122 | { | 120 | { |
123 | struct ip_nat_protocol *proto = | 121 | struct ip_nat_protocol *proto; |
124 | __ip_nat_proto_find(tuple->dst.protonum); | 122 | int ret = 0; |
125 | 123 | ||
126 | /* If we are supposed to map IPs, then we must be in the | 124 | /* If we are supposed to map IPs, then we must be in the |
127 | range specified, otherwise let this drag us onto a new src IP. */ | 125 | range specified, otherwise let this drag us onto a new src IP. */ |
@@ -131,12 +129,15 @@ in_range(const struct ip_conntrack_tuple *tuple, | |||
131 | return 0; | 129 | return 0; |
132 | } | 130 | } |
133 | 131 | ||
132 | rcu_read_lock(); | ||
133 | proto = __ip_nat_proto_find(tuple->dst.protonum); | ||
134 | if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) | 134 | if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) |
135 | || proto->in_range(tuple, IP_NAT_MANIP_SRC, | 135 | || proto->in_range(tuple, IP_NAT_MANIP_SRC, |
136 | &range->min, &range->max)) | 136 | &range->min, &range->max)) |
137 | return 1; | 137 | ret = 1; |
138 | rcu_read_unlock(); | ||
138 | 139 | ||
139 | return 0; | 140 | return ret; |
140 | } | 141 | } |
141 | 142 | ||
142 | static inline int | 143 | static inline int |
@@ -260,27 +261,25 @@ get_unique_tuple(struct ip_conntrack_tuple *tuple, | |||
260 | /* 3) The per-protocol part of the manip is made to map into | 261 | /* 3) The per-protocol part of the manip is made to map into |
261 | the range to make a unique tuple. */ | 262 | the range to make a unique tuple. */ |
262 | 263 | ||
263 | proto = ip_nat_proto_find_get(orig_tuple->dst.protonum); | 264 | rcu_read_lock(); |
265 | proto = __ip_nat_proto_find(orig_tuple->dst.protonum); | ||
264 | 266 | ||
265 | /* Change protocol info to have some randomization */ | 267 | /* Change protocol info to have some randomization */ |
266 | if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) { | 268 | if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) { |
267 | proto->unique_tuple(tuple, range, maniptype, conntrack); | 269 | proto->unique_tuple(tuple, range, maniptype, conntrack); |
268 | ip_nat_proto_put(proto); | 270 | goto out; |
269 | return; | ||
270 | } | 271 | } |
271 | 272 | ||
272 | /* Only bother mapping if it's not already in range and unique */ | 273 | /* Only bother mapping if it's not already in range and unique */ |
273 | if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) | 274 | if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) |
274 | || proto->in_range(tuple, maniptype, &range->min, &range->max)) | 275 | || proto->in_range(tuple, maniptype, &range->min, &range->max)) |
275 | && !ip_nat_used_tuple(tuple, conntrack)) { | 276 | && !ip_nat_used_tuple(tuple, conntrack)) |
276 | ip_nat_proto_put(proto); | 277 | goto out; |
277 | return; | ||
278 | } | ||
279 | 278 | ||
280 | /* Last change: get protocol to try to obtain unique tuple. */ | 279 | /* Last change: get protocol to try to obtain unique tuple. */ |
281 | proto->unique_tuple(tuple, range, maniptype, conntrack); | 280 | proto->unique_tuple(tuple, range, maniptype, conntrack); |
282 | 281 | out: | |
283 | ip_nat_proto_put(proto); | 282 | rcu_read_unlock(); |
284 | } | 283 | } |
285 | 284 | ||
286 | unsigned int | 285 | unsigned int |
@@ -360,12 +359,11 @@ manip_pkt(u_int16_t proto, | |||
360 | iph = (void *)(*pskb)->data + iphdroff; | 359 | iph = (void *)(*pskb)->data + iphdroff; |
361 | 360 | ||
362 | /* Manipulate protcol part. */ | 361 | /* Manipulate protcol part. */ |
363 | p = ip_nat_proto_find_get(proto); | 362 | |
364 | if (!p->manip_pkt(pskb, iphdroff, target, maniptype)) { | 363 | /* rcu_read_lock()ed by nf_hook_slow */ |
365 | ip_nat_proto_put(p); | 364 | p = __ip_nat_proto_find(proto); |
365 | if (!p->manip_pkt(pskb, iphdroff, target, maniptype)) | ||
366 | return 0; | 366 | return 0; |
367 | } | ||
368 | ip_nat_proto_put(p); | ||
369 | 367 | ||
370 | iph = (void *)(*pskb)->data + iphdroff; | 368 | iph = (void *)(*pskb)->data + iphdroff; |
371 | 369 | ||
@@ -422,6 +420,7 @@ int ip_nat_icmp_reply_translation(struct ip_conntrack *ct, | |||
422 | struct icmphdr icmp; | 420 | struct icmphdr icmp; |
423 | struct iphdr ip; | 421 | struct iphdr ip; |
424 | } *inside; | 422 | } *inside; |
423 | struct ip_conntrack_protocol *proto; | ||
425 | struct ip_conntrack_tuple inner, target; | 424 | struct ip_conntrack_tuple inner, target; |
426 | int hdrlen = (*pskb)->nh.iph->ihl * 4; | 425 | int hdrlen = (*pskb)->nh.iph->ihl * 4; |
427 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 426 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
@@ -457,10 +456,11 @@ int ip_nat_icmp_reply_translation(struct ip_conntrack *ct, | |||
457 | DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n", | 456 | DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n", |
458 | *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); | 457 | *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); |
459 | 458 | ||
459 | /* rcu_read_lock()ed by nf_hook_slow */ | ||
460 | proto = __ip_conntrack_proto_find(inside->ip.protocol); | ||
460 | if (!ip_ct_get_tuple(&inside->ip, *pskb, (*pskb)->nh.iph->ihl*4 + | 461 | if (!ip_ct_get_tuple(&inside->ip, *pskb, (*pskb)->nh.iph->ihl*4 + |
461 | sizeof(struct icmphdr) + inside->ip.ihl*4, | 462 | sizeof(struct icmphdr) + inside->ip.ihl*4, |
462 | &inner, | 463 | &inner, proto)) |
463 | __ip_conntrack_proto_find(inside->ip.protocol))) | ||
464 | return 0; | 464 | return 0; |
465 | 465 | ||
466 | /* Change inner back to look like incoming packet. We do the | 466 | /* Change inner back to look like incoming packet. We do the |
@@ -515,7 +515,7 @@ int ip_nat_protocol_register(struct ip_nat_protocol *proto) | |||
515 | ret = -EBUSY; | 515 | ret = -EBUSY; |
516 | goto out; | 516 | goto out; |
517 | } | 517 | } |
518 | ip_nat_protos[proto->protonum] = proto; | 518 | rcu_assign_pointer(ip_nat_protos[proto->protonum], proto); |
519 | out: | 519 | out: |
520 | write_unlock_bh(&ip_nat_lock); | 520 | write_unlock_bh(&ip_nat_lock); |
521 | return ret; | 521 | return ret; |
@@ -526,11 +526,10 @@ EXPORT_SYMBOL(ip_nat_protocol_register); | |||
526 | void ip_nat_protocol_unregister(struct ip_nat_protocol *proto) | 526 | void ip_nat_protocol_unregister(struct ip_nat_protocol *proto) |
527 | { | 527 | { |
528 | write_lock_bh(&ip_nat_lock); | 528 | write_lock_bh(&ip_nat_lock); |
529 | ip_nat_protos[proto->protonum] = &ip_nat_unknown_protocol; | 529 | rcu_assign_pointer(ip_nat_protos[proto->protonum], |
530 | &ip_nat_unknown_protocol); | ||
530 | write_unlock_bh(&ip_nat_lock); | 531 | write_unlock_bh(&ip_nat_lock); |
531 | 532 | synchronize_rcu(); | |
532 | /* Someone could be still looking at the proto in a bh. */ | ||
533 | synchronize_net(); | ||
534 | } | 533 | } |
535 | EXPORT_SYMBOL(ip_nat_protocol_unregister); | 534 | EXPORT_SYMBOL(ip_nat_protocol_unregister); |
536 | 535 | ||
@@ -594,10 +593,10 @@ static int __init ip_nat_init(void) | |||
594 | /* Sew in builtin protocols. */ | 593 | /* Sew in builtin protocols. */ |
595 | write_lock_bh(&ip_nat_lock); | 594 | write_lock_bh(&ip_nat_lock); |
596 | for (i = 0; i < MAX_IP_NAT_PROTO; i++) | 595 | for (i = 0; i < MAX_IP_NAT_PROTO; i++) |
597 | ip_nat_protos[i] = &ip_nat_unknown_protocol; | 596 | rcu_assign_pointer(ip_nat_protos[i], &ip_nat_unknown_protocol); |
598 | ip_nat_protos[IPPROTO_TCP] = &ip_nat_protocol_tcp; | 597 | rcu_assign_pointer(ip_nat_protos[IPPROTO_TCP], &ip_nat_protocol_tcp); |
599 | ip_nat_protos[IPPROTO_UDP] = &ip_nat_protocol_udp; | 598 | rcu_assign_pointer(ip_nat_protos[IPPROTO_UDP], &ip_nat_protocol_udp); |
600 | ip_nat_protos[IPPROTO_ICMP] = &ip_nat_protocol_icmp; | 599 | rcu_assign_pointer(ip_nat_protos[IPPROTO_ICMP], &ip_nat_protocol_icmp); |
601 | write_unlock_bh(&ip_nat_lock); | 600 | write_unlock_bh(&ip_nat_lock); |
602 | 601 | ||
603 | for (i = 0; i < ip_nat_htable_size; i++) { | 602 | for (i = 0; i < ip_nat_htable_size; i++) { |
@@ -605,8 +604,8 @@ static int __init ip_nat_init(void) | |||
605 | } | 604 | } |
606 | 605 | ||
607 | /* FIXME: Man, this is a hack. <SIGH> */ | 606 | /* FIXME: Man, this is a hack. <SIGH> */ |
608 | IP_NF_ASSERT(ip_conntrack_destroyed == NULL); | 607 | IP_NF_ASSERT(rcu_dereference(ip_conntrack_destroyed) == NULL); |
609 | ip_conntrack_destroyed = &ip_nat_cleanup_conntrack; | 608 | rcu_assign_pointer(ip_conntrack_destroyed, ip_nat_cleanup_conntrack); |
610 | 609 | ||
611 | /* Initialize fake conntrack so that NAT will skip it */ | 610 | /* Initialize fake conntrack so that NAT will skip it */ |
612 | ip_conntrack_untracked.status |= IPS_NAT_DONE_MASK; | 611 | ip_conntrack_untracked.status |= IPS_NAT_DONE_MASK; |
@@ -624,7 +623,8 @@ static int clean_nat(struct ip_conntrack *i, void *data) | |||
624 | static void __exit ip_nat_cleanup(void) | 623 | static void __exit ip_nat_cleanup(void) |
625 | { | 624 | { |
626 | ip_ct_iterate_cleanup(&clean_nat, NULL); | 625 | ip_ct_iterate_cleanup(&clean_nat, NULL); |
627 | ip_conntrack_destroyed = NULL; | 626 | rcu_assign_pointer(ip_conntrack_destroyed, NULL); |
627 | synchronize_rcu(); | ||
628 | vfree(bysource); | 628 | vfree(bysource); |
629 | } | 629 | } |
630 | 630 | ||
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c index f4a62f2522ff..d9c37fd94228 100644 --- a/net/ipv4/netfilter/ipt_LOG.c +++ b/net/ipv4/netfilter/ipt_LOG.c | |||
@@ -489,7 +489,7 @@ static int __init ipt_log_init(void) | |||
489 | 489 | ||
490 | static void __exit ipt_log_fini(void) | 490 | static void __exit ipt_log_fini(void) |
491 | { | 491 | { |
492 | nf_log_unregister_logger(&ipt_log_logger); | 492 | nf_log_unregister(&ipt_log_logger); |
493 | xt_unregister_target(&ipt_log_reg); | 493 | xt_unregister_target(&ipt_log_reg); |
494 | } | 494 | } |
495 | 495 | ||
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index 3a1eacc634b3..a26404dbe212 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c | |||
@@ -419,7 +419,7 @@ static void __exit ipt_ulog_fini(void) | |||
419 | DEBUGP("ipt_ULOG: cleanup_module\n"); | 419 | DEBUGP("ipt_ULOG: cleanup_module\n"); |
420 | 420 | ||
421 | if (nflog) | 421 | if (nflog) |
422 | nf_log_unregister_logger(&ipt_ulog_logger); | 422 | nf_log_unregister(&ipt_ulog_logger); |
423 | xt_unregister_target(&ipt_ulog_reg); | 423 | xt_unregister_target(&ipt_ulog_reg); |
424 | sock_release(nflognl->sk_socket); | 424 | sock_release(nflognl->sk_socket); |
425 | 425 | ||
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 677b6c80c618..e5aa4d849b00 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c | |||
@@ -170,7 +170,9 @@ icmp_error_message(struct sk_buff *skb, | |||
170 | return -NF_ACCEPT; | 170 | return -NF_ACCEPT; |
171 | } | 171 | } |
172 | 172 | ||
173 | /* rcu_read_lock()ed by nf_hook_slow */ | ||
173 | innerproto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); | 174 | innerproto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); |
175 | |||
174 | dataoff = skb->nh.iph->ihl*4 + sizeof(inside->icmp); | 176 | dataoff = skb->nh.iph->ihl*4 + sizeof(inside->icmp); |
175 | /* Are they talking about one of our connections? */ | 177 | /* Are they talking about one of our connections? */ |
176 | if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET, | 178 | if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET, |
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index cf1010827be1..2c01378d3592 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c | |||
@@ -53,7 +53,7 @@ static struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]; | |||
53 | static inline struct nf_nat_protocol * | 53 | static inline struct nf_nat_protocol * |
54 | __nf_nat_proto_find(u_int8_t protonum) | 54 | __nf_nat_proto_find(u_int8_t protonum) |
55 | { | 55 | { |
56 | return nf_nat_protos[protonum]; | 56 | return rcu_dereference(nf_nat_protos[protonum]); |
57 | } | 57 | } |
58 | 58 | ||
59 | struct nf_nat_protocol * | 59 | struct nf_nat_protocol * |
@@ -61,13 +61,11 @@ nf_nat_proto_find_get(u_int8_t protonum) | |||
61 | { | 61 | { |
62 | struct nf_nat_protocol *p; | 62 | struct nf_nat_protocol *p; |
63 | 63 | ||
64 | /* we need to disable preemption to make sure 'p' doesn't get | 64 | rcu_read_lock(); |
65 | * removed until we've grabbed the reference */ | ||
66 | preempt_disable(); | ||
67 | p = __nf_nat_proto_find(protonum); | 65 | p = __nf_nat_proto_find(protonum); |
68 | if (!try_module_get(p->me)) | 66 | if (!try_module_get(p->me)) |
69 | p = &nf_nat_unknown_protocol; | 67 | p = &nf_nat_unknown_protocol; |
70 | preempt_enable(); | 68 | rcu_read_unlock(); |
71 | 69 | ||
72 | return p; | 70 | return p; |
73 | } | 71 | } |
@@ -126,8 +124,8 @@ in_range(const struct nf_conntrack_tuple *tuple, | |||
126 | const struct nf_nat_range *range) | 124 | const struct nf_nat_range *range) |
127 | { | 125 | { |
128 | struct nf_nat_protocol *proto; | 126 | struct nf_nat_protocol *proto; |
127 | int ret = 0; | ||
129 | 128 | ||
130 | proto = __nf_nat_proto_find(tuple->dst.protonum); | ||
131 | /* If we are supposed to map IPs, then we must be in the | 129 | /* If we are supposed to map IPs, then we must be in the |
132 | range specified, otherwise let this drag us onto a new src IP. */ | 130 | range specified, otherwise let this drag us onto a new src IP. */ |
133 | if (range->flags & IP_NAT_RANGE_MAP_IPS) { | 131 | if (range->flags & IP_NAT_RANGE_MAP_IPS) { |
@@ -136,12 +134,15 @@ in_range(const struct nf_conntrack_tuple *tuple, | |||
136 | return 0; | 134 | return 0; |
137 | } | 135 | } |
138 | 136 | ||
137 | rcu_read_lock(); | ||
138 | proto = __nf_nat_proto_find(tuple->dst.protonum); | ||
139 | if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || | 139 | if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || |
140 | proto->in_range(tuple, IP_NAT_MANIP_SRC, | 140 | proto->in_range(tuple, IP_NAT_MANIP_SRC, |
141 | &range->min, &range->max)) | 141 | &range->min, &range->max)) |
142 | return 1; | 142 | ret = 1; |
143 | rcu_read_unlock(); | ||
143 | 144 | ||
144 | return 0; | 145 | return ret; |
145 | } | 146 | } |
146 | 147 | ||
147 | static inline int | 148 | static inline int |
@@ -268,27 +269,25 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple, | |||
268 | /* 3) The per-protocol part of the manip is made to map into | 269 | /* 3) The per-protocol part of the manip is made to map into |
269 | the range to make a unique tuple. */ | 270 | the range to make a unique tuple. */ |
270 | 271 | ||
271 | proto = nf_nat_proto_find_get(orig_tuple->dst.protonum); | 272 | rcu_read_lock(); |
273 | proto = __nf_nat_proto_find(orig_tuple->dst.protonum); | ||
272 | 274 | ||
273 | /* Change protocol info to have some randomization */ | 275 | /* Change protocol info to have some randomization */ |
274 | if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) { | 276 | if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) { |
275 | proto->unique_tuple(tuple, range, maniptype, ct); | 277 | proto->unique_tuple(tuple, range, maniptype, ct); |
276 | nf_nat_proto_put(proto); | 278 | goto out; |
277 | return; | ||
278 | } | 279 | } |
279 | 280 | ||
280 | /* Only bother mapping if it's not already in range and unique */ | 281 | /* Only bother mapping if it's not already in range and unique */ |
281 | if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || | 282 | if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || |
282 | proto->in_range(tuple, maniptype, &range->min, &range->max)) && | 283 | proto->in_range(tuple, maniptype, &range->min, &range->max)) && |
283 | !nf_nat_used_tuple(tuple, ct)) { | 284 | !nf_nat_used_tuple(tuple, ct)) |
284 | nf_nat_proto_put(proto); | 285 | goto out; |
285 | return; | ||
286 | } | ||
287 | 286 | ||
288 | /* Last change: get protocol to try to obtain unique tuple. */ | 287 | /* Last change: get protocol to try to obtain unique tuple. */ |
289 | proto->unique_tuple(tuple, range, maniptype, ct); | 288 | proto->unique_tuple(tuple, range, maniptype, ct); |
290 | 289 | out: | |
291 | nf_nat_proto_put(proto); | 290 | rcu_read_unlock(); |
292 | } | 291 | } |
293 | 292 | ||
294 | unsigned int | 293 | unsigned int |
@@ -369,12 +368,11 @@ manip_pkt(u_int16_t proto, | |||
369 | iph = (void *)(*pskb)->data + iphdroff; | 368 | iph = (void *)(*pskb)->data + iphdroff; |
370 | 369 | ||
371 | /* Manipulate protcol part. */ | 370 | /* Manipulate protcol part. */ |
372 | p = nf_nat_proto_find_get(proto); | 371 | |
373 | if (!p->manip_pkt(pskb, iphdroff, target, maniptype)) { | 372 | /* rcu_read_lock()ed by nf_hook_slow */ |
374 | nf_nat_proto_put(p); | 373 | p = __nf_nat_proto_find(proto); |
374 | if (!p->manip_pkt(pskb, iphdroff, target, maniptype)) | ||
375 | return 0; | 375 | return 0; |
376 | } | ||
377 | nf_nat_proto_put(p); | ||
378 | 376 | ||
379 | iph = (void *)(*pskb)->data + iphdroff; | 377 | iph = (void *)(*pskb)->data + iphdroff; |
380 | 378 | ||
@@ -431,6 +429,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, | |||
431 | struct icmphdr icmp; | 429 | struct icmphdr icmp; |
432 | struct iphdr ip; | 430 | struct iphdr ip; |
433 | } *inside; | 431 | } *inside; |
432 | struct nf_conntrack_l4proto *l4proto; | ||
434 | struct nf_conntrack_tuple inner, target; | 433 | struct nf_conntrack_tuple inner, target; |
435 | int hdrlen = (*pskb)->nh.iph->ihl * 4; | 434 | int hdrlen = (*pskb)->nh.iph->ihl * 4; |
436 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 435 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
@@ -466,16 +465,16 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, | |||
466 | DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n", | 465 | DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n", |
467 | *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); | 466 | *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); |
468 | 467 | ||
468 | /* rcu_read_lock()ed by nf_hook_slow */ | ||
469 | l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); | ||
470 | |||
469 | if (!nf_ct_get_tuple(*pskb, | 471 | if (!nf_ct_get_tuple(*pskb, |
470 | (*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr), | 472 | (*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr), |
471 | (*pskb)->nh.iph->ihl*4 + | 473 | (*pskb)->nh.iph->ihl*4 + |
472 | sizeof(struct icmphdr) + inside->ip.ihl*4, | 474 | sizeof(struct icmphdr) + inside->ip.ihl*4, |
473 | (u_int16_t)AF_INET, | 475 | (u_int16_t)AF_INET, |
474 | inside->ip.protocol, | 476 | inside->ip.protocol, |
475 | &inner, | 477 | &inner, l3proto, l4proto)) |
476 | l3proto, | ||
477 | __nf_ct_l4proto_find((u_int16_t)PF_INET, | ||
478 | inside->ip.protocol))) | ||
479 | return 0; | 478 | return 0; |
480 | 479 | ||
481 | /* Change inner back to look like incoming packet. We do the | 480 | /* Change inner back to look like incoming packet. We do the |
@@ -529,7 +528,7 @@ int nf_nat_protocol_register(struct nf_nat_protocol *proto) | |||
529 | ret = -EBUSY; | 528 | ret = -EBUSY; |
530 | goto out; | 529 | goto out; |
531 | } | 530 | } |
532 | nf_nat_protos[proto->protonum] = proto; | 531 | rcu_assign_pointer(nf_nat_protos[proto->protonum], proto); |
533 | out: | 532 | out: |
534 | write_unlock_bh(&nf_nat_lock); | 533 | write_unlock_bh(&nf_nat_lock); |
535 | return ret; | 534 | return ret; |
@@ -540,11 +539,10 @@ EXPORT_SYMBOL(nf_nat_protocol_register); | |||
540 | void nf_nat_protocol_unregister(struct nf_nat_protocol *proto) | 539 | void nf_nat_protocol_unregister(struct nf_nat_protocol *proto) |
541 | { | 540 | { |
542 | write_lock_bh(&nf_nat_lock); | 541 | write_lock_bh(&nf_nat_lock); |
543 | nf_nat_protos[proto->protonum] = &nf_nat_unknown_protocol; | 542 | rcu_assign_pointer(nf_nat_protos[proto->protonum], |
543 | &nf_nat_unknown_protocol); | ||
544 | write_unlock_bh(&nf_nat_lock); | 544 | write_unlock_bh(&nf_nat_lock); |
545 | 545 | synchronize_rcu(); | |
546 | /* Someone could be still looking at the proto in a bh. */ | ||
547 | synchronize_net(); | ||
548 | } | 546 | } |
549 | EXPORT_SYMBOL(nf_nat_protocol_unregister); | 547 | EXPORT_SYMBOL(nf_nat_protocol_unregister); |
550 | 548 | ||
@@ -608,10 +606,10 @@ static int __init nf_nat_init(void) | |||
608 | /* Sew in builtin protocols. */ | 606 | /* Sew in builtin protocols. */ |
609 | write_lock_bh(&nf_nat_lock); | 607 | write_lock_bh(&nf_nat_lock); |
610 | for (i = 0; i < MAX_IP_NAT_PROTO; i++) | 608 | for (i = 0; i < MAX_IP_NAT_PROTO; i++) |
611 | nf_nat_protos[i] = &nf_nat_unknown_protocol; | 609 | rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol); |
612 | nf_nat_protos[IPPROTO_TCP] = &nf_nat_protocol_tcp; | 610 | rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp); |
613 | nf_nat_protos[IPPROTO_UDP] = &nf_nat_protocol_udp; | 611 | rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp); |
614 | nf_nat_protos[IPPROTO_ICMP] = &nf_nat_protocol_icmp; | 612 | rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp); |
615 | write_unlock_bh(&nf_nat_lock); | 613 | write_unlock_bh(&nf_nat_lock); |
616 | 614 | ||
617 | for (i = 0; i < nf_nat_htable_size; i++) { | 615 | for (i = 0; i < nf_nat_htable_size; i++) { |
@@ -619,8 +617,8 @@ static int __init nf_nat_init(void) | |||
619 | } | 617 | } |
620 | 618 | ||
621 | /* FIXME: Man, this is a hack. <SIGH> */ | 619 | /* FIXME: Man, this is a hack. <SIGH> */ |
622 | NF_CT_ASSERT(nf_conntrack_destroyed == NULL); | 620 | NF_CT_ASSERT(rcu_dereference(nf_conntrack_destroyed) == NULL); |
623 | nf_conntrack_destroyed = &nf_nat_cleanup_conntrack; | 621 | rcu_assign_pointer(nf_conntrack_destroyed, nf_nat_cleanup_conntrack); |
624 | 622 | ||
625 | /* Initialize fake conntrack so that NAT will skip it */ | 623 | /* Initialize fake conntrack so that NAT will skip it */ |
626 | nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; | 624 | nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; |
@@ -644,7 +642,8 @@ static int clean_nat(struct nf_conn *i, void *data) | |||
644 | static void __exit nf_nat_cleanup(void) | 642 | static void __exit nf_nat_cleanup(void) |
645 | { | 643 | { |
646 | nf_ct_iterate_cleanup(&clean_nat, NULL); | 644 | nf_ct_iterate_cleanup(&clean_nat, NULL); |
647 | nf_conntrack_destroyed = NULL; | 645 | rcu_assign_pointer(nf_conntrack_destroyed, NULL); |
646 | synchronize_rcu(); | ||
648 | vfree(bysource); | 647 | vfree(bysource); |
649 | nf_ct_l3proto_put(l3proto); | 648 | nf_ct_l3proto_put(l3proto); |
650 | } | 649 | } |
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 5ce6cd85680b..9a582fb4ef9f 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c | |||
@@ -26,16 +26,16 @@ | |||
26 | */ | 26 | */ |
27 | #define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */ | 27 | #define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */ |
28 | 28 | ||
29 | static int fast_convergence = 1; | 29 | static int fast_convergence __read_mostly = 1; |
30 | static int max_increment = 16; | 30 | static int max_increment __read_mostly = 16; |
31 | static int beta = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */ | 31 | static int beta __read_mostly = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */ |
32 | static int initial_ssthresh = 100; | 32 | static int initial_ssthresh __read_mostly = 100; |
33 | static int bic_scale = 41; | 33 | static int bic_scale __read_mostly = 41; |
34 | static int tcp_friendliness = 1; | 34 | static int tcp_friendliness __read_mostly = 1; |
35 | 35 | ||
36 | static u32 cube_rtt_scale; | 36 | static u32 cube_rtt_scale __read_mostly; |
37 | static u32 beta_scale; | 37 | static u32 beta_scale __read_mostly; |
38 | static u64 cube_factor; | 38 | static u64 cube_factor __read_mostly; |
39 | 39 | ||
40 | /* Note parameters that are used for precomputing scale factors are read-only */ | 40 | /* Note parameters that are used for precomputing scale factors are read-only */ |
41 | module_param(fast_convergence, int, 0644); | 41 | module_param(fast_convergence, int, 0644); |
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 63318b6e9d51..1020eb48d8d1 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c | |||
@@ -10,22 +10,23 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <net/tcp.h> | 11 | #include <net/tcp.h> |
12 | 12 | ||
13 | #define ALPHA_BASE (1<<7) /* 1.0 with shift << 7 */ | 13 | #define ALPHA_BASE (1<<7) /* 1.0 with shift << 7 */ |
14 | #define BETA_MIN (1<<6) /* 0.5 with shift << 7 */ | 14 | #define BETA_MIN (1<<6) /* 0.5 with shift << 7 */ |
15 | #define BETA_MAX 102 /* 0.8 with shift << 7 */ | 15 | #define BETA_MAX 102 /* 0.8 with shift << 7 */ |
16 | 16 | ||
17 | static int use_rtt_scaling = 1; | 17 | static int use_rtt_scaling __read_mostly = 1; |
18 | module_param(use_rtt_scaling, int, 0644); | 18 | module_param(use_rtt_scaling, int, 0644); |
19 | MODULE_PARM_DESC(use_rtt_scaling, "turn on/off RTT scaling"); | 19 | MODULE_PARM_DESC(use_rtt_scaling, "turn on/off RTT scaling"); |
20 | 20 | ||
21 | static int use_bandwidth_switch = 1; | 21 | static int use_bandwidth_switch __read_mostly = 1; |
22 | module_param(use_bandwidth_switch, int, 0644); | 22 | module_param(use_bandwidth_switch, int, 0644); |
23 | MODULE_PARM_DESC(use_bandwidth_switch, "turn on/off bandwidth switcher"); | 23 | MODULE_PARM_DESC(use_bandwidth_switch, "turn on/off bandwidth switcher"); |
24 | 24 | ||
25 | struct htcp { | 25 | struct htcp { |
26 | u32 alpha; /* Fixed point arith, << 7 */ | 26 | u32 alpha; /* Fixed point arith, << 7 */ |
27 | u8 beta; /* Fixed point arith, << 7 */ | 27 | u8 beta; /* Fixed point arith, << 7 */ |
28 | u8 modeswitch; /* Delay modeswitch until we had at least one congestion event */ | 28 | u8 modeswitch; /* Delay modeswitch |
29 | until we had at least one congestion event */ | ||
29 | u16 pkts_acked; | 30 | u16 pkts_acked; |
30 | u32 packetcount; | 31 | u32 packetcount; |
31 | u32 minRTT; | 32 | u32 minRTT; |
@@ -44,14 +45,14 @@ struct htcp { | |||
44 | u32 lasttime; | 45 | u32 lasttime; |
45 | }; | 46 | }; |
46 | 47 | ||
47 | static inline u32 htcp_cong_time(struct htcp *ca) | 48 | static inline u32 htcp_cong_time(const struct htcp *ca) |
48 | { | 49 | { |
49 | return jiffies - ca->last_cong; | 50 | return jiffies - ca->last_cong; |
50 | } | 51 | } |
51 | 52 | ||
52 | static inline u32 htcp_ccount(struct htcp *ca) | 53 | static inline u32 htcp_ccount(const struct htcp *ca) |
53 | { | 54 | { |
54 | return htcp_cong_time(ca)/ca->minRTT; | 55 | return htcp_cong_time(ca) / ca->minRTT; |
55 | } | 56 | } |
56 | 57 | ||
57 | static inline void htcp_reset(struct htcp *ca) | 58 | static inline void htcp_reset(struct htcp *ca) |
@@ -67,10 +68,12 @@ static u32 htcp_cwnd_undo(struct sock *sk) | |||
67 | { | 68 | { |
68 | const struct tcp_sock *tp = tcp_sk(sk); | 69 | const struct tcp_sock *tp = tcp_sk(sk); |
69 | struct htcp *ca = inet_csk_ca(sk); | 70 | struct htcp *ca = inet_csk_ca(sk); |
71 | |||
70 | ca->last_cong = ca->undo_last_cong; | 72 | ca->last_cong = ca->undo_last_cong; |
71 | ca->maxRTT = ca->undo_maxRTT; | 73 | ca->maxRTT = ca->undo_maxRTT; |
72 | ca->old_maxB = ca->undo_old_maxB; | 74 | ca->old_maxB = ca->undo_old_maxB; |
73 | return max(tp->snd_cwnd, (tp->snd_ssthresh<<7)/ca->beta); | 75 | |
76 | return max(tp->snd_cwnd, (tp->snd_ssthresh << 7) / ca->beta); | ||
74 | } | 77 | } |
75 | 78 | ||
76 | static inline void measure_rtt(struct sock *sk) | 79 | static inline void measure_rtt(struct sock *sk) |
@@ -78,17 +81,19 @@ static inline void measure_rtt(struct sock *sk) | |||
78 | const struct inet_connection_sock *icsk = inet_csk(sk); | 81 | const struct inet_connection_sock *icsk = inet_csk(sk); |
79 | const struct tcp_sock *tp = tcp_sk(sk); | 82 | const struct tcp_sock *tp = tcp_sk(sk); |
80 | struct htcp *ca = inet_csk_ca(sk); | 83 | struct htcp *ca = inet_csk_ca(sk); |
81 | u32 srtt = tp->srtt>>3; | 84 | u32 srtt = tp->srtt >> 3; |
82 | 85 | ||
83 | /* keep track of minimum RTT seen so far, minRTT is zero at first */ | 86 | /* keep track of minimum RTT seen so far, minRTT is zero at first */ |
84 | if (ca->minRTT > srtt || !ca->minRTT) | 87 | if (ca->minRTT > srtt || !ca->minRTT) |
85 | ca->minRTT = srtt; | 88 | ca->minRTT = srtt; |
86 | 89 | ||
87 | /* max RTT */ | 90 | /* max RTT */ |
88 | if (icsk->icsk_ca_state == TCP_CA_Open && tp->snd_ssthresh < 0xFFFF && htcp_ccount(ca) > 3) { | 91 | if (icsk->icsk_ca_state == TCP_CA_Open |
92 | && tp->snd_ssthresh < 0xFFFF && htcp_ccount(ca) > 3) { | ||
89 | if (ca->maxRTT < ca->minRTT) | 93 | if (ca->maxRTT < ca->minRTT) |
90 | ca->maxRTT = ca->minRTT; | 94 | ca->maxRTT = ca->minRTT; |
91 | if (ca->maxRTT < srtt && srtt <= ca->maxRTT+msecs_to_jiffies(20)) | 95 | if (ca->maxRTT < srtt |
96 | && srtt <= ca->maxRTT + msecs_to_jiffies(20)) | ||
92 | ca->maxRTT = srtt; | 97 | ca->maxRTT = srtt; |
93 | } | 98 | } |
94 | } | 99 | } |
@@ -116,15 +121,16 @@ static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked) | |||
116 | 121 | ||
117 | ca->packetcount += pkts_acked; | 122 | ca->packetcount += pkts_acked; |
118 | 123 | ||
119 | if (ca->packetcount >= tp->snd_cwnd - (ca->alpha>>7? : 1) | 124 | if (ca->packetcount >= tp->snd_cwnd - (ca->alpha >> 7 ? : 1) |
120 | && now - ca->lasttime >= ca->minRTT | 125 | && now - ca->lasttime >= ca->minRTT |
121 | && ca->minRTT > 0) { | 126 | && ca->minRTT > 0) { |
122 | __u32 cur_Bi = ca->packetcount*HZ/(now - ca->lasttime); | 127 | __u32 cur_Bi = ca->packetcount * HZ / (now - ca->lasttime); |
128 | |||
123 | if (htcp_ccount(ca) <= 3) { | 129 | if (htcp_ccount(ca) <= 3) { |
124 | /* just after backoff */ | 130 | /* just after backoff */ |
125 | ca->minB = ca->maxB = ca->Bi = cur_Bi; | 131 | ca->minB = ca->maxB = ca->Bi = cur_Bi; |
126 | } else { | 132 | } else { |
127 | ca->Bi = (3*ca->Bi + cur_Bi)/4; | 133 | ca->Bi = (3 * ca->Bi + cur_Bi) / 4; |
128 | if (ca->Bi > ca->maxB) | 134 | if (ca->Bi > ca->maxB) |
129 | ca->maxB = ca->Bi; | 135 | ca->maxB = ca->Bi; |
130 | if (ca->minB > ca->maxB) | 136 | if (ca->minB > ca->maxB) |
@@ -142,7 +148,7 @@ static inline void htcp_beta_update(struct htcp *ca, u32 minRTT, u32 maxRTT) | |||
142 | u32 old_maxB = ca->old_maxB; | 148 | u32 old_maxB = ca->old_maxB; |
143 | ca->old_maxB = ca->maxB; | 149 | ca->old_maxB = ca->maxB; |
144 | 150 | ||
145 | if (!between(5*maxB, 4*old_maxB, 6*old_maxB)) { | 151 | if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) { |
146 | ca->beta = BETA_MIN; | 152 | ca->beta = BETA_MIN; |
147 | ca->modeswitch = 0; | 153 | ca->modeswitch = 0; |
148 | return; | 154 | return; |
@@ -150,7 +156,7 @@ static inline void htcp_beta_update(struct htcp *ca, u32 minRTT, u32 maxRTT) | |||
150 | } | 156 | } |
151 | 157 | ||
152 | if (ca->modeswitch && minRTT > msecs_to_jiffies(10) && maxRTT) { | 158 | if (ca->modeswitch && minRTT > msecs_to_jiffies(10) && maxRTT) { |
153 | ca->beta = (minRTT<<7)/maxRTT; | 159 | ca->beta = (minRTT << 7) / maxRTT; |
154 | if (ca->beta < BETA_MIN) | 160 | if (ca->beta < BETA_MIN) |
155 | ca->beta = BETA_MIN; | 161 | ca->beta = BETA_MIN; |
156 | else if (ca->beta > BETA_MAX) | 162 | else if (ca->beta > BETA_MAX) |
@@ -169,23 +175,26 @@ static inline void htcp_alpha_update(struct htcp *ca) | |||
169 | 175 | ||
170 | if (diff > HZ) { | 176 | if (diff > HZ) { |
171 | diff -= HZ; | 177 | diff -= HZ; |
172 | factor = 1+ ( 10*diff + ((diff/2)*(diff/2)/HZ) )/HZ; | 178 | factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / HZ)) / HZ; |
173 | } | 179 | } |
174 | 180 | ||
175 | if (use_rtt_scaling && minRTT) { | 181 | if (use_rtt_scaling && minRTT) { |
176 | u32 scale = (HZ<<3)/(10*minRTT); | 182 | u32 scale = (HZ << 3) / (10 * minRTT); |
177 | scale = min(max(scale, 1U<<2), 10U<<3); /* clamping ratio to interval [0.5,10]<<3 */ | 183 | |
178 | factor = (factor<<3)/scale; | 184 | /* clamping ratio to interval [0.5,10]<<3 */ |
185 | scale = min(max(scale, 1U << 2), 10U << 3); | ||
186 | factor = (factor << 3) / scale; | ||
179 | if (!factor) | 187 | if (!factor) |
180 | factor = 1; | 188 | factor = 1; |
181 | } | 189 | } |
182 | 190 | ||
183 | ca->alpha = 2*factor*((1<<7)-ca->beta); | 191 | ca->alpha = 2 * factor * ((1 << 7) - ca->beta); |
184 | if (!ca->alpha) | 192 | if (!ca->alpha) |
185 | ca->alpha = ALPHA_BASE; | 193 | ca->alpha = ALPHA_BASE; |
186 | } | 194 | } |
187 | 195 | ||
188 | /* After we have the rtt data to calculate beta, we'd still prefer to wait one | 196 | /* |
197 | * After we have the rtt data to calculate beta, we'd still prefer to wait one | ||
189 | * rtt before we adjust our beta to ensure we are working from a consistent | 198 | * rtt before we adjust our beta to ensure we are working from a consistent |
190 | * data. | 199 | * data. |
191 | * | 200 | * |
@@ -202,15 +211,16 @@ static void htcp_param_update(struct sock *sk) | |||
202 | htcp_beta_update(ca, minRTT, maxRTT); | 211 | htcp_beta_update(ca, minRTT, maxRTT); |
203 | htcp_alpha_update(ca); | 212 | htcp_alpha_update(ca); |
204 | 213 | ||
205 | /* add slowly fading memory for maxRTT to accommodate routing changes etc */ | 214 | /* add slowly fading memory for maxRTT to accommodate routing changes */ |
206 | if (minRTT > 0 && maxRTT > minRTT) | 215 | if (minRTT > 0 && maxRTT > minRTT) |
207 | ca->maxRTT = minRTT + ((maxRTT-minRTT)*95)/100; | 216 | ca->maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100; |
208 | } | 217 | } |
209 | 218 | ||
210 | static u32 htcp_recalc_ssthresh(struct sock *sk) | 219 | static u32 htcp_recalc_ssthresh(struct sock *sk) |
211 | { | 220 | { |
212 | const struct tcp_sock *tp = tcp_sk(sk); | 221 | const struct tcp_sock *tp = tcp_sk(sk); |
213 | const struct htcp *ca = inet_csk_ca(sk); | 222 | const struct htcp *ca = inet_csk_ca(sk); |
223 | |||
214 | htcp_param_update(sk); | 224 | htcp_param_update(sk); |
215 | return max((tp->snd_cwnd * ca->beta) >> 7, 2U); | 225 | return max((tp->snd_cwnd * ca->beta) >> 7, 2U); |
216 | } | 226 | } |
@@ -227,7 +237,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, | |||
227 | if (tp->snd_cwnd <= tp->snd_ssthresh) | 237 | if (tp->snd_cwnd <= tp->snd_ssthresh) |
228 | tcp_slow_start(tp); | 238 | tcp_slow_start(tp); |
229 | else { | 239 | else { |
230 | |||
231 | measure_rtt(sk); | 240 | measure_rtt(sk); |
232 | 241 | ||
233 | /* In dangerous area, increase slowly. | 242 | /* In dangerous area, increase slowly. |
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index cd549aea84f0..da07e9a88ee9 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig | |||
@@ -42,7 +42,8 @@ config IP6_NF_QUEUE | |||
42 | 42 | ||
43 | config IP6_NF_IPTABLES | 43 | config IP6_NF_IPTABLES |
44 | tristate "IP6 tables support (required for filtering)" | 44 | tristate "IP6 tables support (required for filtering)" |
45 | depends on INET && IPV6 && EXPERIMENTAL && NETFILTER_XTABLES | 45 | depends on INET && IPV6 && EXPERIMENTAL |
46 | select NETFILTER_XTABLES | ||
46 | help | 47 | help |
47 | ip6tables is a general, extensible packet identification framework. | 48 | ip6tables is a general, extensible packet identification framework. |
48 | Currently only the packet filtering and packet mangling subsystem | 49 | Currently only the packet filtering and packet mangling subsystem |
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index dc9ec9305778..afaa039d0b7b 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c | |||
@@ -501,7 +501,7 @@ static int __init ip6t_log_init(void) | |||
501 | 501 | ||
502 | static void __exit ip6t_log_fini(void) | 502 | static void __exit ip6t_log_fini(void) |
503 | { | 503 | { |
504 | nf_log_unregister_logger(&ip6t_logger); | 504 | nf_log_unregister(&ip6t_logger); |
505 | xt_unregister_target(&ip6t_log_reg); | 505 | xt_unregister_target(&ip6t_log_reg); |
506 | } | 506 | } |
507 | 507 | ||
diff --git a/net/ipv6/netfilter/ip6t_mh.c b/net/ipv6/netfilter/ip6t_mh.c index 2c7efc6a506d..c2a909893a64 100644 --- a/net/ipv6/netfilter/ip6t_mh.c +++ b/net/ipv6/netfilter/ip6t_mh.c | |||
@@ -66,6 +66,13 @@ match(const struct sk_buff *skb, | |||
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
68 | 68 | ||
69 | if (mh->ip6mh_proto != IPPROTO_NONE) { | ||
70 | duprintf("Dropping invalid MH Payload Proto: %u\n", | ||
71 | mh->ip6mh_proto); | ||
72 | *hotdrop = 1; | ||
73 | return 0; | ||
74 | } | ||
75 | |||
69 | return type_match(mhinfo->types[0], mhinfo->types[1], mh->ip6mh_type, | 76 | return type_match(mhinfo->types[0], mhinfo->types[1], mh->ip6mh_type, |
70 | !!(mhinfo->invflags & IP6T_MH_INV_TYPE)); | 77 | !!(mhinfo->invflags & IP6T_MH_INV_TYPE)); |
71 | } | 78 | } |
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index a2353edf4ebc..4b7be4bb4d03 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | |||
@@ -154,8 +154,8 @@ ipv6_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff, | |||
154 | */ | 154 | */ |
155 | if ((protoff < 0) || (protoff > (*pskb)->len)) { | 155 | if ((protoff < 0) || (protoff > (*pskb)->len)) { |
156 | DEBUGP("ip6_conntrack_core: can't find proto in pkt\n"); | 156 | DEBUGP("ip6_conntrack_core: can't find proto in pkt\n"); |
157 | NF_CT_STAT_INC(error); | 157 | NF_CT_STAT_INC_ATOMIC(error); |
158 | NF_CT_STAT_INC(invalid); | 158 | NF_CT_STAT_INC_ATOMIC(invalid); |
159 | return -NF_ACCEPT; | 159 | return -NF_ACCEPT; |
160 | } | 160 | } |
161 | 161 | ||
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index b08622c992b2..19bdb7cb8ff3 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | |||
@@ -182,6 +182,7 @@ icmpv6_error_message(struct sk_buff *skb, | |||
182 | return -NF_ACCEPT; | 182 | return -NF_ACCEPT; |
183 | } | 183 | } |
184 | 184 | ||
185 | /* rcu_read_lock()ed by nf_hook_slow */ | ||
185 | inproto = __nf_ct_l4proto_find(PF_INET6, inprotonum); | 186 | inproto = __nf_ct_l4proto_find(PF_INET6, inprotonum); |
186 | 187 | ||
187 | /* Are they talking about one of our connections? */ | 188 | /* Are they talking about one of our connections? */ |
diff --git a/net/key/af_key.c b/net/key/af_key.c index f3a026ff9b2c..1c58204d767e 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -2297,16 +2297,17 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg | |||
2297 | &sel, tmp.security, 1); | 2297 | &sel, tmp.security, 1); |
2298 | security_xfrm_policy_free(&tmp); | 2298 | security_xfrm_policy_free(&tmp); |
2299 | 2299 | ||
2300 | xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, | ||
2301 | AUDIT_MAC_IPSEC_DELSPD, (xp) ? 1 : 0, xp, NULL); | ||
2302 | |||
2303 | if (xp == NULL) | 2300 | if (xp == NULL) |
2304 | return -ENOENT; | 2301 | return -ENOENT; |
2305 | 2302 | ||
2306 | err = 0; | 2303 | err = security_xfrm_policy_delete(xp); |
2307 | 2304 | ||
2308 | if ((err = security_xfrm_policy_delete(xp))) | 2305 | xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, |
2306 | AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL); | ||
2307 | |||
2308 | if (err) | ||
2309 | goto out; | 2309 | goto out; |
2310 | |||
2310 | c.seq = hdr->sadb_msg_seq; | 2311 | c.seq = hdr->sadb_msg_seq; |
2311 | c.pid = hdr->sadb_msg_pid; | 2312 | c.pid = hdr->sadb_msg_pid; |
2312 | c.event = XFRM_MSG_DELPOLICY; | 2313 | c.event = XFRM_MSG_DELPOLICY; |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 748f7f00909a..253fce3ad2d3 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -302,7 +302,9 @@ config NETFILTER_XT_TARGET_CONNMARK | |||
302 | tristate '"CONNMARK" target support' | 302 | tristate '"CONNMARK" target support' |
303 | depends on NETFILTER_XTABLES | 303 | depends on NETFILTER_XTABLES |
304 | depends on IP_NF_MANGLE || IP6_NF_MANGLE | 304 | depends on IP_NF_MANGLE || IP6_NF_MANGLE |
305 | depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK) | 305 | depends on IP_NF_CONNTRACK || NF_CONNTRACK |
306 | select IP_NF_CONNTRACK_MARK if IP_NF_CONNTRACK | ||
307 | select NF_CONNTRACK_MARK if NF_CONNTRACK | ||
306 | help | 308 | help |
307 | This option adds a `CONNMARK' target, which allows one to manipulate | 309 | This option adds a `CONNMARK' target, which allows one to manipulate |
308 | the connection mark value. Similar to the MARK target, but | 310 | the connection mark value. Similar to the MARK target, but |
@@ -434,7 +436,9 @@ config NETFILTER_XT_MATCH_COMMENT | |||
434 | config NETFILTER_XT_MATCH_CONNBYTES | 436 | config NETFILTER_XT_MATCH_CONNBYTES |
435 | tristate '"connbytes" per-connection counter match support' | 437 | tristate '"connbytes" per-connection counter match support' |
436 | depends on NETFILTER_XTABLES | 438 | depends on NETFILTER_XTABLES |
437 | depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || (NF_CT_ACCT && NF_CONNTRACK) | 439 | depends on IP_NF_CONNTRACK || NF_CONNTRACK |
440 | select IP_NF_CT_ACCT if IP_NF_CONNTRACK | ||
441 | select NF_CT_ACCT if NF_CONNTRACK | ||
438 | help | 442 | help |
439 | This option adds a `connbytes' match, which allows you to match the | 443 | This option adds a `connbytes' match, which allows you to match the |
440 | number of bytes and/or packets for each direction within a connection. | 444 | number of bytes and/or packets for each direction within a connection. |
@@ -445,7 +449,9 @@ config NETFILTER_XT_MATCH_CONNBYTES | |||
445 | config NETFILTER_XT_MATCH_CONNMARK | 449 | config NETFILTER_XT_MATCH_CONNMARK |
446 | tristate '"connmark" connection mark match support' | 450 | tristate '"connmark" connection mark match support' |
447 | depends on NETFILTER_XTABLES | 451 | depends on NETFILTER_XTABLES |
448 | depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK) | 452 | depends on IP_NF_CONNTRACK || NF_CONNTRACK |
453 | select IP_NF_CONNTRACK_MARK if IP_NF_CONNTRACK | ||
454 | select NF_CONNTRACK_MARK if NF_CONNTRACK | ||
449 | help | 455 | help |
450 | This option adds a `connmark' match, which allows you to match the | 456 | This option adds a `connmark' match, which allows you to match the |
451 | connection mark value previously set for the session by `CONNMARK'. | 457 | connection mark value previously set for the session by `CONNMARK'. |
diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 291b8c6862f1..c3ebdbd917e9 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* netfilter.c: look after the filters for various protocols. | 1 | /* netfilter.c: look after the filters for various protocols. |
2 | * Heavily influenced by the old firewall.c by David Bonn and Alan Cox. | 2 | * Heavily influenced by the old firewall.c by David Bonn and Alan Cox. |
3 | * | 3 | * |
4 | * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any | 4 | * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any |
@@ -22,29 +22,34 @@ | |||
22 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
23 | #include <linux/inetdevice.h> | 23 | #include <linux/inetdevice.h> |
24 | #include <linux/proc_fs.h> | 24 | #include <linux/proc_fs.h> |
25 | #include <linux/mutex.h> | ||
25 | #include <net/sock.h> | 26 | #include <net/sock.h> |
26 | 27 | ||
27 | #include "nf_internals.h" | 28 | #include "nf_internals.h" |
28 | 29 | ||
29 | static DEFINE_SPINLOCK(afinfo_lock); | 30 | static DEFINE_MUTEX(afinfo_mutex); |
30 | 31 | ||
31 | struct nf_afinfo *nf_afinfo[NPROTO] __read_mostly; | 32 | struct nf_afinfo *nf_afinfo[NPROTO] __read_mostly; |
32 | EXPORT_SYMBOL(nf_afinfo); | 33 | EXPORT_SYMBOL(nf_afinfo); |
33 | 34 | ||
34 | int nf_register_afinfo(struct nf_afinfo *afinfo) | 35 | int nf_register_afinfo(struct nf_afinfo *afinfo) |
35 | { | 36 | { |
36 | spin_lock(&afinfo_lock); | 37 | int err; |
38 | |||
39 | err = mutex_lock_interruptible(&afinfo_mutex); | ||
40 | if (err < 0) | ||
41 | return err; | ||
37 | rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo); | 42 | rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo); |
38 | spin_unlock(&afinfo_lock); | 43 | mutex_unlock(&afinfo_mutex); |
39 | return 0; | 44 | return 0; |
40 | } | 45 | } |
41 | EXPORT_SYMBOL_GPL(nf_register_afinfo); | 46 | EXPORT_SYMBOL_GPL(nf_register_afinfo); |
42 | 47 | ||
43 | void nf_unregister_afinfo(struct nf_afinfo *afinfo) | 48 | void nf_unregister_afinfo(struct nf_afinfo *afinfo) |
44 | { | 49 | { |
45 | spin_lock(&afinfo_lock); | 50 | mutex_lock(&afinfo_mutex); |
46 | rcu_assign_pointer(nf_afinfo[afinfo->family], NULL); | 51 | rcu_assign_pointer(nf_afinfo[afinfo->family], NULL); |
47 | spin_unlock(&afinfo_lock); | 52 | mutex_unlock(&afinfo_mutex); |
48 | synchronize_rcu(); | 53 | synchronize_rcu(); |
49 | } | 54 | } |
50 | EXPORT_SYMBOL_GPL(nf_unregister_afinfo); | 55 | EXPORT_SYMBOL_GPL(nf_unregister_afinfo); |
@@ -56,30 +61,31 @@ EXPORT_SYMBOL_GPL(nf_unregister_afinfo); | |||
56 | * packets come back: if the hook is gone, the packet is discarded. */ | 61 | * packets come back: if the hook is gone, the packet is discarded. */ |
57 | struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS] __read_mostly; | 62 | struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS] __read_mostly; |
58 | EXPORT_SYMBOL(nf_hooks); | 63 | EXPORT_SYMBOL(nf_hooks); |
59 | static DEFINE_SPINLOCK(nf_hook_lock); | 64 | static DEFINE_MUTEX(nf_hook_mutex); |
60 | 65 | ||
61 | int nf_register_hook(struct nf_hook_ops *reg) | 66 | int nf_register_hook(struct nf_hook_ops *reg) |
62 | { | 67 | { |
63 | struct list_head *i; | 68 | struct list_head *i; |
69 | int err; | ||
64 | 70 | ||
65 | spin_lock_bh(&nf_hook_lock); | 71 | err = mutex_lock_interruptible(&nf_hook_mutex); |
72 | if (err < 0) | ||
73 | return err; | ||
66 | list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) { | 74 | list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) { |
67 | if (reg->priority < ((struct nf_hook_ops *)i)->priority) | 75 | if (reg->priority < ((struct nf_hook_ops *)i)->priority) |
68 | break; | 76 | break; |
69 | } | 77 | } |
70 | list_add_rcu(®->list, i->prev); | 78 | list_add_rcu(®->list, i->prev); |
71 | spin_unlock_bh(&nf_hook_lock); | 79 | mutex_unlock(&nf_hook_mutex); |
72 | |||
73 | synchronize_net(); | ||
74 | return 0; | 80 | return 0; |
75 | } | 81 | } |
76 | EXPORT_SYMBOL(nf_register_hook); | 82 | EXPORT_SYMBOL(nf_register_hook); |
77 | 83 | ||
78 | void nf_unregister_hook(struct nf_hook_ops *reg) | 84 | void nf_unregister_hook(struct nf_hook_ops *reg) |
79 | { | 85 | { |
80 | spin_lock_bh(&nf_hook_lock); | 86 | mutex_lock(&nf_hook_mutex); |
81 | list_del_rcu(®->list); | 87 | list_del_rcu(®->list); |
82 | spin_unlock_bh(&nf_hook_lock); | 88 | mutex_unlock(&nf_hook_mutex); |
83 | 89 | ||
84 | synchronize_net(); | 90 | synchronize_net(); |
85 | } | 91 | } |
@@ -135,14 +141,14 @@ unsigned int nf_iterate(struct list_head *head, | |||
135 | continue; | 141 | continue; |
136 | 142 | ||
137 | /* Optimization: we don't need to hold module | 143 | /* Optimization: we don't need to hold module |
138 | reference here, since function can't sleep. --RR */ | 144 | reference here, since function can't sleep. --RR */ |
139 | verdict = elem->hook(hook, skb, indev, outdev, okfn); | 145 | verdict = elem->hook(hook, skb, indev, outdev, okfn); |
140 | if (verdict != NF_ACCEPT) { | 146 | if (verdict != NF_ACCEPT) { |
141 | #ifdef CONFIG_NETFILTER_DEBUG | 147 | #ifdef CONFIG_NETFILTER_DEBUG |
142 | if (unlikely((verdict & NF_VERDICT_MASK) | 148 | if (unlikely((verdict & NF_VERDICT_MASK) |
143 | > NF_MAX_VERDICT)) { | 149 | > NF_MAX_VERDICT)) { |
144 | NFDEBUG("Evil return from %p(%u).\n", | 150 | NFDEBUG("Evil return from %p(%u).\n", |
145 | elem->hook, hook); | 151 | elem->hook, hook); |
146 | continue; | 152 | continue; |
147 | } | 153 | } |
148 | #endif | 154 | #endif |
@@ -248,9 +254,12 @@ void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) | |||
248 | { | 254 | { |
249 | void (*attach)(struct sk_buff *, struct sk_buff *); | 255 | void (*attach)(struct sk_buff *, struct sk_buff *); |
250 | 256 | ||
251 | if (skb->nfct && (attach = ip_ct_attach) != NULL) { | 257 | if (skb->nfct) { |
252 | mb(); /* Just to be sure: must be read before executing this */ | 258 | rcu_read_lock(); |
253 | attach(new, skb); | 259 | attach = rcu_dereference(ip_ct_attach); |
260 | if (attach) | ||
261 | attach(new, skb); | ||
262 | rcu_read_unlock(); | ||
254 | } | 263 | } |
255 | } | 264 | } |
256 | EXPORT_SYMBOL(nf_ct_attach); | 265 | EXPORT_SYMBOL(nf_ct_attach); |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 9b02ec4012fb..32891ebc9e68 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -318,6 +318,7 @@ destroy_conntrack(struct nf_conntrack *nfct) | |||
318 | struct nf_conn_help *help = nfct_help(ct); | 318 | struct nf_conn_help *help = nfct_help(ct); |
319 | struct nf_conntrack_l3proto *l3proto; | 319 | struct nf_conntrack_l3proto *l3proto; |
320 | struct nf_conntrack_l4proto *l4proto; | 320 | struct nf_conntrack_l4proto *l4proto; |
321 | typeof(nf_conntrack_destroyed) destroyed; | ||
321 | 322 | ||
322 | DEBUGP("destroy_conntrack(%p)\n", ct); | 323 | DEBUGP("destroy_conntrack(%p)\n", ct); |
323 | NF_CT_ASSERT(atomic_read(&nfct->use) == 0); | 324 | NF_CT_ASSERT(atomic_read(&nfct->use) == 0); |
@@ -332,16 +333,21 @@ destroy_conntrack(struct nf_conntrack *nfct) | |||
332 | /* To make sure we don't get any weird locking issues here: | 333 | /* To make sure we don't get any weird locking issues here: |
333 | * destroy_conntrack() MUST NOT be called with a write lock | 334 | * destroy_conntrack() MUST NOT be called with a write lock |
334 | * to nf_conntrack_lock!!! -HW */ | 335 | * to nf_conntrack_lock!!! -HW */ |
336 | rcu_read_lock(); | ||
335 | l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num); | 337 | l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num); |
336 | if (l3proto && l3proto->destroy) | 338 | if (l3proto && l3proto->destroy) |
337 | l3proto->destroy(ct); | 339 | l3proto->destroy(ct); |
338 | 340 | ||
339 | l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum); | 341 | l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, |
342 | ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum); | ||
340 | if (l4proto && l4proto->destroy) | 343 | if (l4proto && l4proto->destroy) |
341 | l4proto->destroy(ct); | 344 | l4proto->destroy(ct); |
342 | 345 | ||
343 | if (nf_conntrack_destroyed) | 346 | destroyed = rcu_dereference(nf_conntrack_destroyed); |
344 | nf_conntrack_destroyed(ct); | 347 | if (destroyed) |
348 | destroyed(ct); | ||
349 | |||
350 | rcu_read_unlock(); | ||
345 | 351 | ||
346 | write_lock_bh(&nf_conntrack_lock); | 352 | write_lock_bh(&nf_conntrack_lock); |
347 | /* Expectations will have been removed in clean_from_lists, | 353 | /* Expectations will have been removed in clean_from_lists, |
@@ -418,7 +424,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_find_get); | |||
418 | 424 | ||
419 | static void __nf_conntrack_hash_insert(struct nf_conn *ct, | 425 | static void __nf_conntrack_hash_insert(struct nf_conn *ct, |
420 | unsigned int hash, | 426 | unsigned int hash, |
421 | unsigned int repl_hash) | 427 | unsigned int repl_hash) |
422 | { | 428 | { |
423 | ct->id = ++nf_conntrack_next_id; | 429 | ct->id = ++nf_conntrack_next_id; |
424 | list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list, | 430 | list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list, |
@@ -560,7 +566,7 @@ static int early_drop(struct list_head *chain) | |||
560 | if (del_timer(&ct->timeout)) { | 566 | if (del_timer(&ct->timeout)) { |
561 | death_by_timeout((unsigned long)ct); | 567 | death_by_timeout((unsigned long)ct); |
562 | dropped = 1; | 568 | dropped = 1; |
563 | NF_CT_STAT_INC(early_drop); | 569 | NF_CT_STAT_INC_ATOMIC(early_drop); |
564 | } | 570 | } |
565 | nf_ct_put(ct); | 571 | nf_ct_put(ct); |
566 | return dropped; | 572 | return dropped; |
@@ -647,9 +653,14 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, | |||
647 | const struct nf_conntrack_tuple *repl) | 653 | const struct nf_conntrack_tuple *repl) |
648 | { | 654 | { |
649 | struct nf_conntrack_l3proto *l3proto; | 655 | struct nf_conntrack_l3proto *l3proto; |
656 | struct nf_conn *ct; | ||
650 | 657 | ||
658 | rcu_read_lock(); | ||
651 | l3proto = __nf_ct_l3proto_find(orig->src.l3num); | 659 | l3proto = __nf_ct_l3proto_find(orig->src.l3num); |
652 | return __nf_conntrack_alloc(orig, repl, l3proto, 0); | 660 | ct = __nf_conntrack_alloc(orig, repl, l3proto, 0); |
661 | rcu_read_unlock(); | ||
662 | |||
663 | return ct; | ||
653 | } | 664 | } |
654 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); | 665 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); |
655 | 666 | ||
@@ -813,11 +824,13 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb) | |||
813 | 824 | ||
814 | /* Previously seen (loopback or untracked)? Ignore. */ | 825 | /* Previously seen (loopback or untracked)? Ignore. */ |
815 | if ((*pskb)->nfct) { | 826 | if ((*pskb)->nfct) { |
816 | NF_CT_STAT_INC(ignore); | 827 | NF_CT_STAT_INC_ATOMIC(ignore); |
817 | return NF_ACCEPT; | 828 | return NF_ACCEPT; |
818 | } | 829 | } |
819 | 830 | ||
831 | /* rcu_read_lock()ed by nf_hook_slow */ | ||
820 | l3proto = __nf_ct_l3proto_find((u_int16_t)pf); | 832 | l3proto = __nf_ct_l3proto_find((u_int16_t)pf); |
833 | |||
821 | if ((ret = l3proto->prepare(pskb, hooknum, &dataoff, &protonum)) <= 0) { | 834 | if ((ret = l3proto->prepare(pskb, hooknum, &dataoff, &protonum)) <= 0) { |
822 | DEBUGP("not prepared to track yet or error occured\n"); | 835 | DEBUGP("not prepared to track yet or error occured\n"); |
823 | return -ret; | 836 | return -ret; |
@@ -830,8 +843,8 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb) | |||
830 | * core what to do with the packet. */ | 843 | * core what to do with the packet. */ |
831 | if (l4proto->error != NULL && | 844 | if (l4proto->error != NULL && |
832 | (ret = l4proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) { | 845 | (ret = l4proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) { |
833 | NF_CT_STAT_INC(error); | 846 | NF_CT_STAT_INC_ATOMIC(error); |
834 | NF_CT_STAT_INC(invalid); | 847 | NF_CT_STAT_INC_ATOMIC(invalid); |
835 | return -ret; | 848 | return -ret; |
836 | } | 849 | } |
837 | 850 | ||
@@ -839,13 +852,13 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb) | |||
839 | &set_reply, &ctinfo); | 852 | &set_reply, &ctinfo); |
840 | if (!ct) { | 853 | if (!ct) { |
841 | /* Not valid part of a connection */ | 854 | /* Not valid part of a connection */ |
842 | NF_CT_STAT_INC(invalid); | 855 | NF_CT_STAT_INC_ATOMIC(invalid); |
843 | return NF_ACCEPT; | 856 | return NF_ACCEPT; |
844 | } | 857 | } |
845 | 858 | ||
846 | if (IS_ERR(ct)) { | 859 | if (IS_ERR(ct)) { |
847 | /* Too stressed to deal. */ | 860 | /* Too stressed to deal. */ |
848 | NF_CT_STAT_INC(drop); | 861 | NF_CT_STAT_INC_ATOMIC(drop); |
849 | return NF_DROP; | 862 | return NF_DROP; |
850 | } | 863 | } |
851 | 864 | ||
@@ -858,7 +871,7 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb) | |||
858 | DEBUGP("nf_conntrack_in: Can't track with proto module\n"); | 871 | DEBUGP("nf_conntrack_in: Can't track with proto module\n"); |
859 | nf_conntrack_put((*pskb)->nfct); | 872 | nf_conntrack_put((*pskb)->nfct); |
860 | (*pskb)->nfct = NULL; | 873 | (*pskb)->nfct = NULL; |
861 | NF_CT_STAT_INC(invalid); | 874 | NF_CT_STAT_INC_ATOMIC(invalid); |
862 | return -ret; | 875 | return -ret; |
863 | } | 876 | } |
864 | 877 | ||
@@ -872,10 +885,15 @@ EXPORT_SYMBOL_GPL(nf_conntrack_in); | |||
872 | int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, | 885 | int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, |
873 | const struct nf_conntrack_tuple *orig) | 886 | const struct nf_conntrack_tuple *orig) |
874 | { | 887 | { |
875 | return nf_ct_invert_tuple(inverse, orig, | 888 | int ret; |
876 | __nf_ct_l3proto_find(orig->src.l3num), | 889 | |
877 | __nf_ct_l4proto_find(orig->src.l3num, | 890 | rcu_read_lock(); |
878 | orig->dst.protonum)); | 891 | ret = nf_ct_invert_tuple(inverse, orig, |
892 | __nf_ct_l3proto_find(orig->src.l3num), | ||
893 | __nf_ct_l4proto_find(orig->src.l3num, | ||
894 | orig->dst.protonum)); | ||
895 | rcu_read_unlock(); | ||
896 | return ret; | ||
879 | } | 897 | } |
880 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); | 898 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); |
881 | 899 | ||
@@ -1048,7 +1066,7 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data), | |||
1048 | if (iter(ct, data)) | 1066 | if (iter(ct, data)) |
1049 | goto found; | 1067 | goto found; |
1050 | } | 1068 | } |
1051 | } | 1069 | } |
1052 | list_for_each_entry(h, &unconfirmed, list) { | 1070 | list_for_each_entry(h, &unconfirmed, list) { |
1053 | ct = nf_ct_tuplehash_to_ctrack(h); | 1071 | ct = nf_ct_tuplehash_to_ctrack(h); |
1054 | if (iter(ct, data)) | 1072 | if (iter(ct, data)) |
@@ -1089,7 +1107,7 @@ static void free_conntrack_hash(struct list_head *hash, int vmalloced, int size) | |||
1089 | if (vmalloced) | 1107 | if (vmalloced) |
1090 | vfree(hash); | 1108 | vfree(hash); |
1091 | else | 1109 | else |
1092 | free_pages((unsigned long)hash, | 1110 | free_pages((unsigned long)hash, |
1093 | get_order(sizeof(struct list_head) * size)); | 1111 | get_order(sizeof(struct list_head) * size)); |
1094 | } | 1112 | } |
1095 | 1113 | ||
@@ -1105,7 +1123,7 @@ void nf_conntrack_cleanup(void) | |||
1105 | { | 1123 | { |
1106 | int i; | 1124 | int i; |
1107 | 1125 | ||
1108 | ip_ct_attach = NULL; | 1126 | rcu_assign_pointer(ip_ct_attach, NULL); |
1109 | 1127 | ||
1110 | /* This makes sure all current packets have passed through | 1128 | /* This makes sure all current packets have passed through |
1111 | netfilter framework. Roll on, two-stage module | 1129 | netfilter framework. Roll on, two-stage module |
@@ -1150,18 +1168,18 @@ static struct list_head *alloc_hashtable(int size, int *vmalloced) | |||
1150 | struct list_head *hash; | 1168 | struct list_head *hash; |
1151 | unsigned int i; | 1169 | unsigned int i; |
1152 | 1170 | ||
1153 | *vmalloced = 0; | 1171 | *vmalloced = 0; |
1154 | hash = (void*)__get_free_pages(GFP_KERNEL, | 1172 | hash = (void*)__get_free_pages(GFP_KERNEL, |
1155 | get_order(sizeof(struct list_head) | 1173 | get_order(sizeof(struct list_head) |
1156 | * size)); | 1174 | * size)); |
1157 | if (!hash) { | 1175 | if (!hash) { |
1158 | *vmalloced = 1; | 1176 | *vmalloced = 1; |
1159 | printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); | 1177 | printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); |
1160 | hash = vmalloc(sizeof(struct list_head) * size); | 1178 | hash = vmalloc(sizeof(struct list_head) * size); |
1161 | } | 1179 | } |
1162 | 1180 | ||
1163 | if (hash) | 1181 | if (hash) |
1164 | for (i = 0; i < size; i++) | 1182 | for (i = 0; i < size; i++) |
1165 | INIT_LIST_HEAD(&hash[i]); | 1183 | INIT_LIST_HEAD(&hash[i]); |
1166 | 1184 | ||
1167 | return hash; | 1185 | return hash; |
@@ -1268,12 +1286,12 @@ int __init nf_conntrack_init(void) | |||
1268 | 1286 | ||
1269 | /* Don't NEED lock here, but good form anyway. */ | 1287 | /* Don't NEED lock here, but good form anyway. */ |
1270 | write_lock_bh(&nf_conntrack_lock); | 1288 | write_lock_bh(&nf_conntrack_lock); |
1271 | for (i = 0; i < AF_MAX; i++) | 1289 | for (i = 0; i < AF_MAX; i++) |
1272 | nf_ct_l3protos[i] = &nf_conntrack_l3proto_generic; | 1290 | nf_ct_l3protos[i] = &nf_conntrack_l3proto_generic; |
1273 | write_unlock_bh(&nf_conntrack_lock); | 1291 | write_unlock_bh(&nf_conntrack_lock); |
1274 | 1292 | ||
1275 | /* For use by REJECT target */ | 1293 | /* For use by REJECT target */ |
1276 | ip_ct_attach = __nf_conntrack_attach; | 1294 | rcu_assign_pointer(ip_ct_attach, __nf_conntrack_attach); |
1277 | 1295 | ||
1278 | /* Set up fake conntrack: | 1296 | /* Set up fake conntrack: |
1279 | - to never be deleted, not in any hashes */ | 1297 | - to never be deleted, not in any hashes */ |
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 5cdcd7f4e813..ce70a6fc6bda 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c | |||
@@ -130,7 +130,7 @@ void nf_ct_remove_expectations(struct nf_conn *ct) | |||
130 | if (i->master == ct && del_timer(&i->timeout)) { | 130 | if (i->master == ct && del_timer(&i->timeout)) { |
131 | nf_ct_unlink_expect(i); | 131 | nf_ct_unlink_expect(i); |
132 | nf_conntrack_expect_put(i); | 132 | nf_conntrack_expect_put(i); |
133 | } | 133 | } |
134 | } | 134 | } |
135 | } | 135 | } |
136 | EXPORT_SYMBOL_GPL(nf_ct_remove_expectations); | 136 | EXPORT_SYMBOL_GPL(nf_ct_remove_expectations); |
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index 92a947168761..3089dfc40c88 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c | |||
@@ -126,7 +126,7 @@ get_ipv6_addr(const char *src, size_t dlen, struct in6_addr *dst, u_int8_t term) | |||
126 | } | 126 | } |
127 | 127 | ||
128 | static int try_number(const char *data, size_t dlen, u_int32_t array[], | 128 | static int try_number(const char *data, size_t dlen, u_int32_t array[], |
129 | int array_size, char sep, char term) | 129 | int array_size, char sep, char term) |
130 | { | 130 | { |
131 | u_int32_t i, len; | 131 | u_int32_t i, len; |
132 | 132 | ||
@@ -413,8 +413,8 @@ static int help(struct sk_buff **pskb, | |||
413 | goto out_update_nl; | 413 | goto out_update_nl; |
414 | } | 414 | } |
415 | 415 | ||
416 | /* Initialize IP/IPv6 addr to expected address (it's not mentioned | 416 | /* Initialize IP/IPv6 addr to expected address (it's not mentioned |
417 | in EPSV responses) */ | 417 | in EPSV responses) */ |
418 | cmd.l3num = ct->tuplehash[dir].tuple.src.l3num; | 418 | cmd.l3num = ct->tuplehash[dir].tuple.src.l3num; |
419 | memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, | 419 | memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, |
420 | sizeof(cmd.u3.all)); | 420 | sizeof(cmd.u3.all)); |
@@ -466,11 +466,11 @@ static int help(struct sk_buff **pskb, | |||
466 | memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, | 466 | memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, |
467 | sizeof(cmd.u3.all))) { | 467 | sizeof(cmd.u3.all))) { |
468 | /* Enrico Scholz's passive FTP to partially RNAT'd ftp | 468 | /* Enrico Scholz's passive FTP to partially RNAT'd ftp |
469 | server: it really wants us to connect to a | 469 | server: it really wants us to connect to a |
470 | different IP address. Simply don't record it for | 470 | different IP address. Simply don't record it for |
471 | NAT. */ | 471 | NAT. */ |
472 | if (cmd.l3num == PF_INET) { | 472 | if (cmd.l3num == PF_INET) { |
473 | DEBUGP("conntrack_ftp: NOT RECORDING: " NIPQUAD_FMT " != " NIPQUAD_FMT "\n", | 473 | DEBUGP("conntrack_ftp: NOT RECORDING: " NIPQUAD_FMT " != " NIPQUAD_FMT "\n", |
474 | NIPQUAD(cmd.u3.ip), | 474 | NIPQUAD(cmd.u3.ip), |
475 | NIPQUAD(ct->tuplehash[dir].tuple.src.u3.ip)); | 475 | NIPQUAD(ct->tuplehash[dir].tuple.src.u3.ip)); |
476 | } else { | 476 | } else { |
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 6d8568959f82..b284db73ca7c 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c | |||
@@ -49,7 +49,7 @@ MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper"); | |||
49 | static int callforward_filter __read_mostly = 1; | 49 | static int callforward_filter __read_mostly = 1; |
50 | module_param(callforward_filter, bool, 0600); | 50 | module_param(callforward_filter, bool, 0600); |
51 | MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations " | 51 | MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations " |
52 | "if both endpoints are on different sides " | 52 | "if both endpoints are on different sides " |
53 | "(determined by routing information)"); | 53 | "(determined by routing information)"); |
54 | 54 | ||
55 | /* Hooks for NAT */ | 55 | /* Hooks for NAT */ |
@@ -300,7 +300,7 @@ static int expect_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct, | |||
300 | IPPROTO_UDP, NULL, &rtcp_port); | 300 | IPPROTO_UDP, NULL, &rtcp_port); |
301 | 301 | ||
302 | if (memcmp(&ct->tuplehash[dir].tuple.src.u3, | 302 | if (memcmp(&ct->tuplehash[dir].tuple.src.u3, |
303 | &ct->tuplehash[!dir].tuple.dst.u3, | 303 | &ct->tuplehash[!dir].tuple.dst.u3, |
304 | sizeof(ct->tuplehash[dir].tuple.src.u3)) && | 304 | sizeof(ct->tuplehash[dir].tuple.src.u3)) && |
305 | (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) && | 305 | (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) && |
306 | ct->status & IPS_NAT_MASK) { | 306 | ct->status & IPS_NAT_MASK) { |
@@ -743,7 +743,7 @@ static int callforward_do_filter(union nf_conntrack_address *src, | |||
743 | rt2 = (struct rt6_info *)ip6_route_output(NULL, &fl2); | 743 | rt2 = (struct rt6_info *)ip6_route_output(NULL, &fl2); |
744 | if (rt2) { | 744 | if (rt2) { |
745 | if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, | 745 | if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, |
746 | sizeof(rt1->rt6i_gateway)) && | 746 | sizeof(rt1->rt6i_gateway)) && |
747 | rt1->u.dst.dev == rt2->u.dst.dev) | 747 | rt1->u.dst.dev == rt2->u.dst.dev) |
748 | ret = 1; | 748 | ret = 1; |
749 | dst_release(&rt2->u.dst); | 749 | dst_release(&rt2->u.dst); |
@@ -780,7 +780,7 @@ static int expect_callforwarding(struct sk_buff **pskb, | |||
780 | * we don't need to track the second call */ | 780 | * we don't need to track the second call */ |
781 | if (callforward_filter && | 781 | if (callforward_filter && |
782 | callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3, | 782 | callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3, |
783 | ct->tuplehash[!dir].tuple.src.l3num)) { | 783 | ct->tuplehash[!dir].tuple.src.l3num)) { |
784 | DEBUGP("nf_ct_q931: Call Forwarding not tracked\n"); | 784 | DEBUGP("nf_ct_q931: Call Forwarding not tracked\n"); |
785 | return 0; | 785 | return 0; |
786 | } | 786 | } |
@@ -840,7 +840,7 @@ static int process_setup(struct sk_buff **pskb, struct nf_conn *ct, | |||
840 | if ((setup->options & eSetup_UUIE_destCallSignalAddress) && | 840 | if ((setup->options & eSetup_UUIE_destCallSignalAddress) && |
841 | (set_h225_addr) && ct->status && IPS_NAT_MASK && | 841 | (set_h225_addr) && ct->status && IPS_NAT_MASK && |
842 | get_h225_addr(ct, *data, &setup->destCallSignalAddress, | 842 | get_h225_addr(ct, *data, &setup->destCallSignalAddress, |
843 | &addr, &port) && | 843 | &addr, &port) && |
844 | memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) { | 844 | memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) { |
845 | DEBUGP("nf_ct_q931: set destCallSignalAddress " | 845 | DEBUGP("nf_ct_q931: set destCallSignalAddress " |
846 | NIP6_FMT ":%hu->" NIP6_FMT ":%hu\n", | 846 | NIP6_FMT ":%hu->" NIP6_FMT ":%hu\n", |
@@ -858,7 +858,7 @@ static int process_setup(struct sk_buff **pskb, struct nf_conn *ct, | |||
858 | if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) && | 858 | if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) && |
859 | (set_h225_addr) && ct->status & IPS_NAT_MASK && | 859 | (set_h225_addr) && ct->status & IPS_NAT_MASK && |
860 | get_h225_addr(ct, *data, &setup->sourceCallSignalAddress, | 860 | get_h225_addr(ct, *data, &setup->sourceCallSignalAddress, |
861 | &addr, &port) && | 861 | &addr, &port) && |
862 | memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) { | 862 | memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) { |
863 | DEBUGP("nf_ct_q931: set sourceCallSignalAddress " | 863 | DEBUGP("nf_ct_q931: set sourceCallSignalAddress " |
864 | NIP6_FMT ":%hu->" NIP6_FMT ":%hu\n", | 864 | NIP6_FMT ":%hu->" NIP6_FMT ":%hu\n", |
@@ -1282,7 +1282,7 @@ static int expect_q931(struct sk_buff **pskb, struct nf_conn *ct, | |||
1282 | for (i = 0; i < count; i++) { | 1282 | for (i = 0; i < count; i++) { |
1283 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && | 1283 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && |
1284 | memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, | 1284 | memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, |
1285 | sizeof(addr)) == 0 && port != 0) | 1285 | sizeof(addr)) == 0 && port != 0) |
1286 | break; | 1286 | break; |
1287 | } | 1287 | } |
1288 | 1288 | ||
@@ -1294,7 +1294,7 @@ static int expect_q931(struct sk_buff **pskb, struct nf_conn *ct, | |||
1294 | return -1; | 1294 | return -1; |
1295 | nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, | 1295 | nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, |
1296 | gkrouted_only ? /* only accept calls from GK? */ | 1296 | gkrouted_only ? /* only accept calls from GK? */ |
1297 | &ct->tuplehash[!dir].tuple.src.u3 : | 1297 | &ct->tuplehash[!dir].tuple.src.u3 : |
1298 | NULL, | 1298 | NULL, |
1299 | &ct->tuplehash[!dir].tuple.dst.u3, | 1299 | &ct->tuplehash[!dir].tuple.dst.u3, |
1300 | IPPROTO_TCP, NULL, &port); | 1300 | IPPROTO_TCP, NULL, &port); |
@@ -1513,7 +1513,7 @@ static int process_arq(struct sk_buff **pskb, struct nf_conn *ct, | |||
1513 | set_h225_addr = rcu_dereference(set_h225_addr_hook); | 1513 | set_h225_addr = rcu_dereference(set_h225_addr_hook); |
1514 | if ((arq->options & eAdmissionRequest_destCallSignalAddress) && | 1514 | if ((arq->options & eAdmissionRequest_destCallSignalAddress) && |
1515 | get_h225_addr(ct, *data, &arq->destCallSignalAddress, | 1515 | get_h225_addr(ct, *data, &arq->destCallSignalAddress, |
1516 | &addr, &port) && | 1516 | &addr, &port) && |
1517 | !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && | 1517 | !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && |
1518 | port == info->sig_port[dir] && | 1518 | port == info->sig_port[dir] && |
1519 | set_h225_addr && ct->status & IPS_NAT_MASK) { | 1519 | set_h225_addr && ct->status & IPS_NAT_MASK) { |
@@ -1526,7 +1526,7 @@ static int process_arq(struct sk_buff **pskb, struct nf_conn *ct, | |||
1526 | 1526 | ||
1527 | if ((arq->options & eAdmissionRequest_srcCallSignalAddress) && | 1527 | if ((arq->options & eAdmissionRequest_srcCallSignalAddress) && |
1528 | get_h225_addr(ct, *data, &arq->srcCallSignalAddress, | 1528 | get_h225_addr(ct, *data, &arq->srcCallSignalAddress, |
1529 | &addr, &port) && | 1529 | &addr, &port) && |
1530 | !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && | 1530 | !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && |
1531 | set_h225_addr && ct->status & IPS_NAT_MASK) { | 1531 | set_h225_addr && ct->status & IPS_NAT_MASK) { |
1532 | /* Calling ARQ */ | 1532 | /* Calling ARQ */ |
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index ed01db634399..43ccd0e2e8ae 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c | |||
@@ -57,7 +57,7 @@ static const char *dccprotos[] = { | |||
57 | 57 | ||
58 | #if 0 | 58 | #if 0 |
59 | #define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s:" format, \ | 59 | #define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s:" format, \ |
60 | __FILE__, __FUNCTION__ , ## args) | 60 | __FILE__, __FUNCTION__ , ## args) |
61 | #else | 61 | #else |
62 | #define DEBUGP(format, args...) | 62 | #define DEBUGP(format, args...) |
63 | #endif | 63 | #endif |
diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c index a3d31c3ac8e6..cbd96f3c1b89 100644 --- a/net/netfilter/nf_conntrack_l3proto_generic.c +++ b/net/netfilter/nf_conntrack_l3proto_generic.c | |||
@@ -77,7 +77,7 @@ generic_prepare(struct sk_buff **pskb, unsigned int hooknum, | |||
77 | 77 | ||
78 | 78 | ||
79 | static u_int32_t generic_get_features(const struct nf_conntrack_tuple *tuple) | 79 | static u_int32_t generic_get_features(const struct nf_conntrack_tuple *tuple) |
80 | 80 | ||
81 | { | 81 | { |
82 | return NF_CT_F_BASIC; | 82 | return NF_CT_F_BASIC; |
83 | } | 83 | } |
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c index 2a48efdf0d67..bb26a658cc1c 100644 --- a/net/netfilter/nf_conntrack_netbios_ns.c +++ b/net/netfilter/nf_conntrack_netbios_ns.c | |||
@@ -43,7 +43,7 @@ module_param(timeout, uint, 0400); | |||
43 | MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds"); | 43 | MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds"); |
44 | 44 | ||
45 | static int help(struct sk_buff **pskb, unsigned int protoff, | 45 | static int help(struct sk_buff **pskb, unsigned int protoff, |
46 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) | 46 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) |
47 | { | 47 | { |
48 | struct nf_conntrack_expect *exp; | 48 | struct nf_conntrack_expect *exp; |
49 | struct iphdr *iph = (*pskb)->nh.iph; | 49 | struct iphdr *iph = (*pskb)->nh.iph; |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index c64f029f7052..48f05314ebf7 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -6,10 +6,10 @@ | |||
6 | * (C) 2003 by Patrick Mchardy <kaber@trash.net> | 6 | * (C) 2003 by Patrick Mchardy <kaber@trash.net> |
7 | * (C) 2005-2006 by Pablo Neira Ayuso <pablo@eurodev.net> | 7 | * (C) 2005-2006 by Pablo Neira Ayuso <pablo@eurodev.net> |
8 | * | 8 | * |
9 | * I've reworked this stuff to use attributes instead of conntrack | 9 | * I've reworked this stuff to use attributes instead of conntrack |
10 | * structures. 5.44 am. I need more tea. --pablo 05/07/11. | 10 | * structures. 5.44 am. I need more tea. --pablo 05/07/11. |
11 | * | 11 | * |
12 | * Initial connection tracking via netlink development funded and | 12 | * Initial connection tracking via netlink development funded and |
13 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) | 13 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) |
14 | * | 14 | * |
15 | * Further development of this code funded by Astaro AG (http://www.astaro.com) | 15 | * Further development of this code funded by Astaro AG (http://www.astaro.com) |
@@ -53,7 +53,7 @@ MODULE_LICENSE("GPL"); | |||
53 | static char __initdata version[] = "0.93"; | 53 | static char __initdata version[] = "0.93"; |
54 | 54 | ||
55 | static inline int | 55 | static inline int |
56 | ctnetlink_dump_tuples_proto(struct sk_buff *skb, | 56 | ctnetlink_dump_tuples_proto(struct sk_buff *skb, |
57 | const struct nf_conntrack_tuple *tuple, | 57 | const struct nf_conntrack_tuple *tuple, |
58 | struct nf_conntrack_l4proto *l4proto) | 58 | struct nf_conntrack_l4proto *l4proto) |
59 | { | 59 | { |
@@ -64,7 +64,7 @@ ctnetlink_dump_tuples_proto(struct sk_buff *skb, | |||
64 | 64 | ||
65 | if (likely(l4proto->tuple_to_nfattr)) | 65 | if (likely(l4proto->tuple_to_nfattr)) |
66 | ret = l4proto->tuple_to_nfattr(skb, tuple); | 66 | ret = l4proto->tuple_to_nfattr(skb, tuple); |
67 | 67 | ||
68 | NFA_NEST_END(skb, nest_parms); | 68 | NFA_NEST_END(skb, nest_parms); |
69 | 69 | ||
70 | return ret; | 70 | return ret; |
@@ -135,7 +135,7 @@ ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) | |||
135 | timeout = 0; | 135 | timeout = 0; |
136 | else | 136 | else |
137 | timeout = htonl(timeout_l / HZ); | 137 | timeout = htonl(timeout_l / HZ); |
138 | 138 | ||
139 | NFA_PUT(skb, CTA_TIMEOUT, sizeof(timeout), &timeout); | 139 | NFA_PUT(skb, CTA_TIMEOUT, sizeof(timeout), &timeout); |
140 | return 0; | 140 | return 0; |
141 | 141 | ||
@@ -154,7 +154,7 @@ ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct nf_conn *ct) | |||
154 | nf_ct_l4proto_put(l4proto); | 154 | nf_ct_l4proto_put(l4proto); |
155 | return 0; | 155 | return 0; |
156 | } | 156 | } |
157 | 157 | ||
158 | nest_proto = NFA_NEST(skb, CTA_PROTOINFO); | 158 | nest_proto = NFA_NEST(skb, CTA_PROTOINFO); |
159 | 159 | ||
160 | ret = l4proto->to_nfattr(skb, nest_proto, ct); | 160 | ret = l4proto->to_nfattr(skb, nest_proto, ct); |
@@ -178,7 +178,7 @@ ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct) | |||
178 | 178 | ||
179 | if (!help || !help->helper) | 179 | if (!help || !help->helper) |
180 | return 0; | 180 | return 0; |
181 | 181 | ||
182 | nest_helper = NFA_NEST(skb, CTA_HELP); | 182 | nest_helper = NFA_NEST(skb, CTA_HELP); |
183 | NFA_PUT(skb, CTA_HELP_NAME, strlen(help->helper->name), help->helper->name); | 183 | NFA_PUT(skb, CTA_HELP_NAME, strlen(help->helper->name), help->helper->name); |
184 | 184 | ||
@@ -250,7 +250,7 @@ static inline int | |||
250 | ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) | 250 | ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) |
251 | { | 251 | { |
252 | __be32 use = htonl(atomic_read(&ct->ct_general.use)); | 252 | __be32 use = htonl(atomic_read(&ct->ct_general.use)); |
253 | 253 | ||
254 | NFA_PUT(skb, CTA_USE, sizeof(u_int32_t), &use); | 254 | NFA_PUT(skb, CTA_USE, sizeof(u_int32_t), &use); |
255 | return 0; | 255 | return 0; |
256 | 256 | ||
@@ -262,7 +262,7 @@ nfattr_failure: | |||
262 | 262 | ||
263 | static int | 263 | static int |
264 | ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | 264 | ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, |
265 | int event, int nowait, | 265 | int event, int nowait, |
266 | const struct nf_conn *ct) | 266 | const struct nf_conn *ct) |
267 | { | 267 | { |
268 | struct nlmsghdr *nlh; | 268 | struct nlmsghdr *nlh; |
@@ -277,7 +277,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | |||
277 | nfmsg = NLMSG_DATA(nlh); | 277 | nfmsg = NLMSG_DATA(nlh); |
278 | 278 | ||
279 | nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0; | 279 | nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0; |
280 | nfmsg->nfgen_family = | 280 | nfmsg->nfgen_family = |
281 | ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; | 281 | ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; |
282 | nfmsg->version = NFNETLINK_V0; | 282 | nfmsg->version = NFNETLINK_V0; |
283 | nfmsg->res_id = 0; | 283 | nfmsg->res_id = 0; |
@@ -286,7 +286,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | |||
286 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) | 286 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) |
287 | goto nfattr_failure; | 287 | goto nfattr_failure; |
288 | NFA_NEST_END(skb, nest_parms); | 288 | NFA_NEST_END(skb, nest_parms); |
289 | 289 | ||
290 | nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY); | 290 | nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY); |
291 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) | 291 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) |
292 | goto nfattr_failure; | 292 | goto nfattr_failure; |
@@ -314,7 +314,7 @@ nfattr_failure: | |||
314 | 314 | ||
315 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | 315 | #ifdef CONFIG_NF_CONNTRACK_EVENTS |
316 | static int ctnetlink_conntrack_event(struct notifier_block *this, | 316 | static int ctnetlink_conntrack_event(struct notifier_block *this, |
317 | unsigned long events, void *ptr) | 317 | unsigned long events, void *ptr) |
318 | { | 318 | { |
319 | struct nlmsghdr *nlh; | 319 | struct nlmsghdr *nlh; |
320 | struct nfgenmsg *nfmsg; | 320 | struct nfgenmsg *nfmsg; |
@@ -364,7 +364,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, | |||
364 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) | 364 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) |
365 | goto nfattr_failure; | 365 | goto nfattr_failure; |
366 | NFA_NEST_END(skb, nest_parms); | 366 | NFA_NEST_END(skb, nest_parms); |
367 | 367 | ||
368 | nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY); | 368 | nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY); |
369 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) | 369 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) |
370 | goto nfattr_failure; | 370 | goto nfattr_failure; |
@@ -383,16 +383,16 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, | |||
383 | 383 | ||
384 | if (events & IPCT_PROTOINFO | 384 | if (events & IPCT_PROTOINFO |
385 | && ctnetlink_dump_protoinfo(skb, ct) < 0) | 385 | && ctnetlink_dump_protoinfo(skb, ct) < 0) |
386 | goto nfattr_failure; | 386 | goto nfattr_failure; |
387 | 387 | ||
388 | if ((events & IPCT_HELPER || nfct_help(ct)) | 388 | if ((events & IPCT_HELPER || nfct_help(ct)) |
389 | && ctnetlink_dump_helpinfo(skb, ct) < 0) | 389 | && ctnetlink_dump_helpinfo(skb, ct) < 0) |
390 | goto nfattr_failure; | 390 | goto nfattr_failure; |
391 | 391 | ||
392 | #ifdef CONFIG_NF_CONNTRACK_MARK | 392 | #ifdef CONFIG_NF_CONNTRACK_MARK |
393 | if ((events & IPCT_MARK || ct->mark) | 393 | if ((events & IPCT_MARK || ct->mark) |
394 | && ctnetlink_dump_mark(skb, ct) < 0) | 394 | && ctnetlink_dump_mark(skb, ct) < 0) |
395 | goto nfattr_failure; | 395 | goto nfattr_failure; |
396 | #endif | 396 | #endif |
397 | 397 | ||
398 | if (events & IPCT_COUNTER_FILLING && | 398 | if (events & IPCT_COUNTER_FILLING && |
@@ -450,7 +450,7 @@ restart: | |||
450 | cb->args[1] = 0; | 450 | cb->args[1] = 0; |
451 | } | 451 | } |
452 | if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, | 452 | if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, |
453 | cb->nlh->nlmsg_seq, | 453 | cb->nlh->nlmsg_seq, |
454 | IPCTNL_MSG_CT_NEW, | 454 | IPCTNL_MSG_CT_NEW, |
455 | 1, ct) < 0) { | 455 | 1, ct) < 0) { |
456 | nf_conntrack_get(&ct->ct_general); | 456 | nf_conntrack_get(&ct->ct_general); |
@@ -500,7 +500,7 @@ static const size_t cta_min_proto[CTA_PROTO_MAX] = { | |||
500 | }; | 500 | }; |
501 | 501 | ||
502 | static inline int | 502 | static inline int |
503 | ctnetlink_parse_tuple_proto(struct nfattr *attr, | 503 | ctnetlink_parse_tuple_proto(struct nfattr *attr, |
504 | struct nf_conntrack_tuple *tuple) | 504 | struct nf_conntrack_tuple *tuple) |
505 | { | 505 | { |
506 | struct nfattr *tb[CTA_PROTO_MAX]; | 506 | struct nfattr *tb[CTA_PROTO_MAX]; |
@@ -522,7 +522,7 @@ ctnetlink_parse_tuple_proto(struct nfattr *attr, | |||
522 | ret = l4proto->nfattr_to_tuple(tb, tuple); | 522 | ret = l4proto->nfattr_to_tuple(tb, tuple); |
523 | 523 | ||
524 | nf_ct_l4proto_put(l4proto); | 524 | nf_ct_l4proto_put(l4proto); |
525 | 525 | ||
526 | return ret; | 526 | return ret; |
527 | } | 527 | } |
528 | 528 | ||
@@ -609,7 +609,7 @@ nfnetlink_parse_nat(struct nfattr *nat, | |||
609 | int err; | 609 | int err; |
610 | 610 | ||
611 | memset(range, 0, sizeof(*range)); | 611 | memset(range, 0, sizeof(*range)); |
612 | 612 | ||
613 | nfattr_parse_nested(tb, CTA_NAT_MAX, nat); | 613 | nfattr_parse_nested(tb, CTA_NAT_MAX, nat); |
614 | 614 | ||
615 | if (nfattr_bad_size(tb, CTA_NAT_MAX, cta_min_nat)) | 615 | if (nfattr_bad_size(tb, CTA_NAT_MAX, cta_min_nat)) |
@@ -661,7 +661,7 @@ static const size_t cta_min[CTA_MAX] = { | |||
661 | }; | 661 | }; |
662 | 662 | ||
663 | static int | 663 | static int |
664 | ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, | 664 | ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, |
665 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) | 665 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) |
666 | { | 666 | { |
667 | struct nf_conntrack_tuple_hash *h; | 667 | struct nf_conntrack_tuple_hash *h; |
@@ -692,14 +692,14 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
692 | return -ENOENT; | 692 | return -ENOENT; |
693 | 693 | ||
694 | ct = nf_ct_tuplehash_to_ctrack(h); | 694 | ct = nf_ct_tuplehash_to_ctrack(h); |
695 | 695 | ||
696 | if (cda[CTA_ID-1]) { | 696 | if (cda[CTA_ID-1]) { |
697 | u_int32_t id = ntohl(*(__be32 *)NFA_DATA(cda[CTA_ID-1])); | 697 | u_int32_t id = ntohl(*(__be32 *)NFA_DATA(cda[CTA_ID-1])); |
698 | if (ct->id != id) { | 698 | if (ct->id != id) { |
699 | nf_ct_put(ct); | 699 | nf_ct_put(ct); |
700 | return -ENOENT; | 700 | return -ENOENT; |
701 | } | 701 | } |
702 | } | 702 | } |
703 | if (del_timer(&ct->timeout)) | 703 | if (del_timer(&ct->timeout)) |
704 | ct->timeout.function((unsigned long)ct); | 704 | ct->timeout.function((unsigned long)ct); |
705 | 705 | ||
@@ -709,7 +709,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
709 | } | 709 | } |
710 | 710 | ||
711 | static int | 711 | static int |
712 | ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, | 712 | ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, |
713 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) | 713 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) |
714 | { | 714 | { |
715 | struct nf_conntrack_tuple_hash *h; | 715 | struct nf_conntrack_tuple_hash *h; |
@@ -765,7 +765,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
765 | return -ENOMEM; | 765 | return -ENOMEM; |
766 | } | 766 | } |
767 | 767 | ||
768 | err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, | 768 | err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, |
769 | IPCTNL_MSG_CT_NEW, 1, ct); | 769 | IPCTNL_MSG_CT_NEW, 1, ct); |
770 | nf_ct_put(ct); | 770 | nf_ct_put(ct); |
771 | if (err <= 0) | 771 | if (err <= 0) |
@@ -793,12 +793,12 @@ ctnetlink_change_status(struct nf_conn *ct, struct nfattr *cda[]) | |||
793 | if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) | 793 | if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) |
794 | /* unchangeable */ | 794 | /* unchangeable */ |
795 | return -EINVAL; | 795 | return -EINVAL; |
796 | 796 | ||
797 | if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) | 797 | if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) |
798 | /* SEEN_REPLY bit can only be set */ | 798 | /* SEEN_REPLY bit can only be set */ |
799 | return -EINVAL; | 799 | return -EINVAL; |
800 | 800 | ||
801 | 801 | ||
802 | if (d & IPS_ASSURED && !(status & IPS_ASSURED)) | 802 | if (d & IPS_ASSURED && !(status & IPS_ASSURED)) |
803 | /* ASSURED bit can only be set */ | 803 | /* ASSURED bit can only be set */ |
804 | return -EINVAL; | 804 | return -EINVAL; |
@@ -877,7 +877,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nfattr *cda[]) | |||
877 | memset(&help->help, 0, sizeof(help->help)); | 877 | memset(&help->help, 0, sizeof(help->help)); |
878 | } | 878 | } |
879 | } | 879 | } |
880 | 880 | ||
881 | help->helper = helper; | 881 | help->helper = helper; |
882 | 882 | ||
883 | return 0; | 883 | return 0; |
@@ -887,7 +887,7 @@ static inline int | |||
887 | ctnetlink_change_timeout(struct nf_conn *ct, struct nfattr *cda[]) | 887 | ctnetlink_change_timeout(struct nf_conn *ct, struct nfattr *cda[]) |
888 | { | 888 | { |
889 | u_int32_t timeout = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1])); | 889 | u_int32_t timeout = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1])); |
890 | 890 | ||
891 | if (!del_timer(&ct->timeout)) | 891 | if (!del_timer(&ct->timeout)) |
892 | return -ETIME; | 892 | return -ETIME; |
893 | 893 | ||
@@ -955,7 +955,7 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nfattr *cda[]) | |||
955 | } | 955 | } |
956 | 956 | ||
957 | static int | 957 | static int |
958 | ctnetlink_create_conntrack(struct nfattr *cda[], | 958 | ctnetlink_create_conntrack(struct nfattr *cda[], |
959 | struct nf_conntrack_tuple *otuple, | 959 | struct nf_conntrack_tuple *otuple, |
960 | struct nf_conntrack_tuple *rtuple) | 960 | struct nf_conntrack_tuple *rtuple) |
961 | { | 961 | { |
@@ -965,7 +965,7 @@ ctnetlink_create_conntrack(struct nfattr *cda[], | |||
965 | 965 | ||
966 | ct = nf_conntrack_alloc(otuple, rtuple); | 966 | ct = nf_conntrack_alloc(otuple, rtuple); |
967 | if (ct == NULL || IS_ERR(ct)) | 967 | if (ct == NULL || IS_ERR(ct)) |
968 | return -ENOMEM; | 968 | return -ENOMEM; |
969 | 969 | ||
970 | if (!cda[CTA_TIMEOUT-1]) | 970 | if (!cda[CTA_TIMEOUT-1]) |
971 | goto err; | 971 | goto err; |
@@ -1003,13 +1003,13 @@ ctnetlink_create_conntrack(struct nfattr *cda[], | |||
1003 | 1003 | ||
1004 | return 0; | 1004 | return 0; |
1005 | 1005 | ||
1006 | err: | 1006 | err: |
1007 | nf_conntrack_free(ct); | 1007 | nf_conntrack_free(ct); |
1008 | return err; | 1008 | return err; |
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | static int | 1011 | static int |
1012 | ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | 1012 | ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, |
1013 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) | 1013 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) |
1014 | { | 1014 | { |
1015 | struct nf_conntrack_tuple otuple, rtuple; | 1015 | struct nf_conntrack_tuple otuple, rtuple; |
@@ -1065,9 +1065,9 @@ out_unlock: | |||
1065 | return err; | 1065 | return err; |
1066 | } | 1066 | } |
1067 | 1067 | ||
1068 | /*********************************************************************** | 1068 | /*********************************************************************** |
1069 | * EXPECT | 1069 | * EXPECT |
1070 | ***********************************************************************/ | 1070 | ***********************************************************************/ |
1071 | 1071 | ||
1072 | static inline int | 1072 | static inline int |
1073 | ctnetlink_exp_dump_tuple(struct sk_buff *skb, | 1073 | ctnetlink_exp_dump_tuple(struct sk_buff *skb, |
@@ -1075,7 +1075,7 @@ ctnetlink_exp_dump_tuple(struct sk_buff *skb, | |||
1075 | enum ctattr_expect type) | 1075 | enum ctattr_expect type) |
1076 | { | 1076 | { |
1077 | struct nfattr *nest_parms = NFA_NEST(skb, type); | 1077 | struct nfattr *nest_parms = NFA_NEST(skb, type); |
1078 | 1078 | ||
1079 | if (ctnetlink_dump_tuples(skb, tuple) < 0) | 1079 | if (ctnetlink_dump_tuples(skb, tuple) < 0) |
1080 | goto nfattr_failure; | 1080 | goto nfattr_failure; |
1081 | 1081 | ||
@@ -1085,7 +1085,7 @@ ctnetlink_exp_dump_tuple(struct sk_buff *skb, | |||
1085 | 1085 | ||
1086 | nfattr_failure: | 1086 | nfattr_failure: |
1087 | return -1; | 1087 | return -1; |
1088 | } | 1088 | } |
1089 | 1089 | ||
1090 | static inline int | 1090 | static inline int |
1091 | ctnetlink_exp_dump_mask(struct sk_buff *skb, | 1091 | ctnetlink_exp_dump_mask(struct sk_buff *skb, |
@@ -1120,7 +1120,7 @@ nfattr_failure: | |||
1120 | 1120 | ||
1121 | static inline int | 1121 | static inline int |
1122 | ctnetlink_exp_dump_expect(struct sk_buff *skb, | 1122 | ctnetlink_exp_dump_expect(struct sk_buff *skb, |
1123 | const struct nf_conntrack_expect *exp) | 1123 | const struct nf_conntrack_expect *exp) |
1124 | { | 1124 | { |
1125 | struct nf_conn *master = exp->master; | 1125 | struct nf_conn *master = exp->master; |
1126 | __be32 timeout = htonl((exp->timeout.expires - jiffies) / HZ); | 1126 | __be32 timeout = htonl((exp->timeout.expires - jiffies) / HZ); |
@@ -1134,20 +1134,20 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb, | |||
1134 | &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | 1134 | &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
1135 | CTA_EXPECT_MASTER) < 0) | 1135 | CTA_EXPECT_MASTER) < 0) |
1136 | goto nfattr_failure; | 1136 | goto nfattr_failure; |
1137 | 1137 | ||
1138 | NFA_PUT(skb, CTA_EXPECT_TIMEOUT, sizeof(timeout), &timeout); | 1138 | NFA_PUT(skb, CTA_EXPECT_TIMEOUT, sizeof(timeout), &timeout); |
1139 | NFA_PUT(skb, CTA_EXPECT_ID, sizeof(u_int32_t), &id); | 1139 | NFA_PUT(skb, CTA_EXPECT_ID, sizeof(u_int32_t), &id); |
1140 | 1140 | ||
1141 | return 0; | 1141 | return 0; |
1142 | 1142 | ||
1143 | nfattr_failure: | 1143 | nfattr_failure: |
1144 | return -1; | 1144 | return -1; |
1145 | } | 1145 | } |
1146 | 1146 | ||
1147 | static int | 1147 | static int |
1148 | ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | 1148 | ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, |
1149 | int event, | 1149 | int event, |
1150 | int nowait, | 1150 | int nowait, |
1151 | const struct nf_conntrack_expect *exp) | 1151 | const struct nf_conntrack_expect *exp) |
1152 | { | 1152 | { |
1153 | struct nlmsghdr *nlh; | 1153 | struct nlmsghdr *nlh; |
@@ -1250,7 +1250,7 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) | |||
1250 | goto out; | 1250 | goto out; |
1251 | *id = exp->id; | 1251 | *id = exp->id; |
1252 | } | 1252 | } |
1253 | out: | 1253 | out: |
1254 | read_unlock_bh(&nf_conntrack_lock); | 1254 | read_unlock_bh(&nf_conntrack_lock); |
1255 | 1255 | ||
1256 | return skb->len; | 1256 | return skb->len; |
@@ -1262,7 +1262,7 @@ static const size_t cta_min_exp[CTA_EXPECT_MAX] = { | |||
1262 | }; | 1262 | }; |
1263 | 1263 | ||
1264 | static int | 1264 | static int |
1265 | ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, | 1265 | ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, |
1266 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) | 1266 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) |
1267 | { | 1267 | { |
1268 | struct nf_conntrack_tuple tuple; | 1268 | struct nf_conntrack_tuple tuple; |
@@ -1279,7 +1279,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1279 | u32 rlen; | 1279 | u32 rlen; |
1280 | 1280 | ||
1281 | if ((*errp = netlink_dump_start(ctnl, skb, nlh, | 1281 | if ((*errp = netlink_dump_start(ctnl, skb, nlh, |
1282 | ctnetlink_exp_dump_table, | 1282 | ctnetlink_exp_dump_table, |
1283 | ctnetlink_done)) != 0) | 1283 | ctnetlink_done)) != 0) |
1284 | return -EINVAL; | 1284 | return -EINVAL; |
1285 | rlen = NLMSG_ALIGN(nlh->nlmsg_len); | 1285 | rlen = NLMSG_ALIGN(nlh->nlmsg_len); |
@@ -1307,14 +1307,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1307 | nf_conntrack_expect_put(exp); | 1307 | nf_conntrack_expect_put(exp); |
1308 | return -ENOENT; | 1308 | return -ENOENT; |
1309 | } | 1309 | } |
1310 | } | 1310 | } |
1311 | 1311 | ||
1312 | err = -ENOMEM; | 1312 | err = -ENOMEM; |
1313 | skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); | 1313 | skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
1314 | if (!skb2) | 1314 | if (!skb2) |
1315 | goto out; | 1315 | goto out; |
1316 | 1316 | ||
1317 | err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, | 1317 | err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, |
1318 | nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, | 1318 | nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, |
1319 | 1, exp); | 1319 | 1, exp); |
1320 | if (err <= 0) | 1320 | if (err <= 0) |
@@ -1332,7 +1332,7 @@ out: | |||
1332 | } | 1332 | } |
1333 | 1333 | ||
1334 | static int | 1334 | static int |
1335 | ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | 1335 | ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, |
1336 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) | 1336 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) |
1337 | { | 1337 | { |
1338 | struct nf_conntrack_expect *exp, *tmp; | 1338 | struct nf_conntrack_expect *exp, *tmp; |
@@ -1366,7 +1366,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1366 | 1366 | ||
1367 | /* after list removal, usage count == 1 */ | 1367 | /* after list removal, usage count == 1 */ |
1368 | nf_conntrack_unexpect_related(exp); | 1368 | nf_conntrack_unexpect_related(exp); |
1369 | /* have to put what we 'get' above. | 1369 | /* have to put what we 'get' above. |
1370 | * after this line usage count == 0 */ | 1370 | * after this line usage count == 0 */ |
1371 | nf_conntrack_expect_put(exp); | 1371 | nf_conntrack_expect_put(exp); |
1372 | } else if (cda[CTA_EXPECT_HELP_NAME-1]) { | 1372 | } else if (cda[CTA_EXPECT_HELP_NAME-1]) { |
@@ -1449,7 +1449,7 @@ ctnetlink_create_expect(struct nfattr *cda[], u_int8_t u3) | |||
1449 | err = -ENOMEM; | 1449 | err = -ENOMEM; |
1450 | goto out; | 1450 | goto out; |
1451 | } | 1451 | } |
1452 | 1452 | ||
1453 | exp->expectfn = NULL; | 1453 | exp->expectfn = NULL; |
1454 | exp->flags = 0; | 1454 | exp->flags = 0; |
1455 | exp->master = ct; | 1455 | exp->master = ct; |
@@ -1460,7 +1460,7 @@ ctnetlink_create_expect(struct nfattr *cda[], u_int8_t u3) | |||
1460 | err = nf_conntrack_expect_related(exp); | 1460 | err = nf_conntrack_expect_related(exp); |
1461 | nf_conntrack_expect_put(exp); | 1461 | nf_conntrack_expect_put(exp); |
1462 | 1462 | ||
1463 | out: | 1463 | out: |
1464 | nf_ct_put(nf_ct_tuplehash_to_ctrack(h)); | 1464 | nf_ct_put(nf_ct_tuplehash_to_ctrack(h)); |
1465 | return err; | 1465 | return err; |
1466 | } | 1466 | } |
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index c59df3bc2bbd..115bcb5d5a7c 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c | |||
@@ -520,7 +520,7 @@ conntrack_pptp_help(struct sk_buff **pskb, unsigned int protoff, | |||
520 | tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph); | 520 | tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph); |
521 | BUG_ON(!tcph); | 521 | BUG_ON(!tcph); |
522 | nexthdr_off += tcph->doff * 4; | 522 | nexthdr_off += tcph->doff * 4; |
523 | datalen = tcplen - tcph->doff * 4; | 523 | datalen = tcplen - tcph->doff * 4; |
524 | 524 | ||
525 | pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph); | 525 | pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph); |
526 | if (!pptph) { | 526 | if (!pptph) { |
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 1a61b72712cd..456155f05c75 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c | |||
@@ -66,7 +66,7 @@ __nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto) | |||
66 | if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL)) | 66 | if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL)) |
67 | return &nf_conntrack_l4proto_generic; | 67 | return &nf_conntrack_l4proto_generic; |
68 | 68 | ||
69 | return nf_ct_protos[l3proto][l4proto]; | 69 | return rcu_dereference(nf_ct_protos[l3proto][l4proto]); |
70 | } | 70 | } |
71 | EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find); | 71 | EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find); |
72 | 72 | ||
@@ -77,11 +77,11 @@ nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t l4proto) | |||
77 | { | 77 | { |
78 | struct nf_conntrack_l4proto *p; | 78 | struct nf_conntrack_l4proto *p; |
79 | 79 | ||
80 | preempt_disable(); | 80 | rcu_read_lock(); |
81 | p = __nf_ct_l4proto_find(l3proto, l4proto); | 81 | p = __nf_ct_l4proto_find(l3proto, l4proto); |
82 | if (!try_module_get(p->me)) | 82 | if (!try_module_get(p->me)) |
83 | p = &nf_conntrack_l4proto_generic; | 83 | p = &nf_conntrack_l4proto_generic; |
84 | preempt_enable(); | 84 | rcu_read_unlock(); |
85 | 85 | ||
86 | return p; | 86 | return p; |
87 | } | 87 | } |
@@ -98,11 +98,11 @@ nf_ct_l3proto_find_get(u_int16_t l3proto) | |||
98 | { | 98 | { |
99 | struct nf_conntrack_l3proto *p; | 99 | struct nf_conntrack_l3proto *p; |
100 | 100 | ||
101 | preempt_disable(); | 101 | rcu_read_lock(); |
102 | p = __nf_ct_l3proto_find(l3proto); | 102 | p = __nf_ct_l3proto_find(l3proto); |
103 | if (!try_module_get(p->me)) | 103 | if (!try_module_get(p->me)) |
104 | p = &nf_conntrack_l3proto_generic; | 104 | p = &nf_conntrack_l3proto_generic; |
105 | preempt_enable(); | 105 | rcu_read_unlock(); |
106 | 106 | ||
107 | return p; | 107 | return p; |
108 | } | 108 | } |
@@ -137,10 +137,8 @@ void nf_ct_l3proto_module_put(unsigned short l3proto) | |||
137 | { | 137 | { |
138 | struct nf_conntrack_l3proto *p; | 138 | struct nf_conntrack_l3proto *p; |
139 | 139 | ||
140 | preempt_disable(); | 140 | /* rcu_read_lock not necessary since the caller holds a reference */ |
141 | p = __nf_ct_l3proto_find(l3proto); | 141 | p = __nf_ct_l3proto_find(l3proto); |
142 | preempt_enable(); | ||
143 | |||
144 | module_put(p->me); | 142 | module_put(p->me); |
145 | } | 143 | } |
146 | EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put); | 144 | EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put); |
@@ -202,7 +200,7 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto) | |||
202 | ret = -EBUSY; | 200 | ret = -EBUSY; |
203 | goto out_unlock; | 201 | goto out_unlock; |
204 | } | 202 | } |
205 | nf_ct_l3protos[proto->l3proto] = proto; | 203 | rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], proto); |
206 | write_unlock_bh(&nf_conntrack_lock); | 204 | write_unlock_bh(&nf_conntrack_lock); |
207 | 205 | ||
208 | ret = nf_ct_l3proto_register_sysctl(proto); | 206 | ret = nf_ct_l3proto_register_sysctl(proto); |
@@ -217,35 +215,21 @@ out: | |||
217 | } | 215 | } |
218 | EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register); | 216 | EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register); |
219 | 217 | ||
220 | int nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto) | 218 | void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto) |
221 | { | 219 | { |
222 | int ret = 0; | 220 | BUG_ON(proto->l3proto >= AF_MAX); |
223 | |||
224 | if (proto->l3proto >= AF_MAX) { | ||
225 | ret = -EBUSY; | ||
226 | goto out; | ||
227 | } | ||
228 | 221 | ||
229 | write_lock_bh(&nf_conntrack_lock); | 222 | write_lock_bh(&nf_conntrack_lock); |
230 | if (nf_ct_l3protos[proto->l3proto] != proto) { | 223 | BUG_ON(nf_ct_l3protos[proto->l3proto] != proto); |
231 | write_unlock_bh(&nf_conntrack_lock); | 224 | rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], |
232 | ret = -EBUSY; | 225 | &nf_conntrack_l3proto_generic); |
233 | goto out; | ||
234 | } | ||
235 | |||
236 | nf_ct_l3protos[proto->l3proto] = &nf_conntrack_l3proto_generic; | ||
237 | write_unlock_bh(&nf_conntrack_lock); | 226 | write_unlock_bh(&nf_conntrack_lock); |
227 | synchronize_rcu(); | ||
238 | 228 | ||
239 | nf_ct_l3proto_unregister_sysctl(proto); | 229 | nf_ct_l3proto_unregister_sysctl(proto); |
240 | 230 | ||
241 | /* Somebody could be still looking at the proto in bh. */ | ||
242 | synchronize_net(); | ||
243 | |||
244 | /* Remove all contrack entries for this protocol */ | 231 | /* Remove all contrack entries for this protocol */ |
245 | nf_ct_iterate_cleanup(kill_l3proto, proto); | 232 | nf_ct_iterate_cleanup(kill_l3proto, proto); |
246 | |||
247 | out: | ||
248 | return ret; | ||
249 | } | 233 | } |
250 | EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister); | 234 | EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister); |
251 | 235 | ||
@@ -356,7 +340,7 @@ retry: | |||
356 | goto retry; | 340 | goto retry; |
357 | } | 341 | } |
358 | 342 | ||
359 | nf_ct_protos[l4proto->l3proto][l4proto->l4proto] = l4proto; | 343 | rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], l4proto); |
360 | write_unlock_bh(&nf_conntrack_lock); | 344 | write_unlock_bh(&nf_conntrack_lock); |
361 | 345 | ||
362 | ret = nf_ct_l4proto_register_sysctl(l4proto); | 346 | ret = nf_ct_l4proto_register_sysctl(l4proto); |
@@ -371,40 +355,25 @@ out: | |||
371 | } | 355 | } |
372 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register); | 356 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register); |
373 | 357 | ||
374 | int nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto) | 358 | void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto) |
375 | { | 359 | { |
376 | int ret = 0; | 360 | BUG_ON(l4proto->l3proto >= PF_MAX); |
377 | |||
378 | if (l4proto->l3proto >= PF_MAX) { | ||
379 | ret = -EBUSY; | ||
380 | goto out; | ||
381 | } | ||
382 | 361 | ||
383 | if (l4proto == &nf_conntrack_l4proto_generic) { | 362 | if (l4proto == &nf_conntrack_l4proto_generic) { |
384 | nf_ct_l4proto_unregister_sysctl(l4proto); | 363 | nf_ct_l4proto_unregister_sysctl(l4proto); |
385 | goto out; | 364 | return; |
386 | } | 365 | } |
387 | 366 | ||
388 | write_lock_bh(&nf_conntrack_lock); | 367 | write_lock_bh(&nf_conntrack_lock); |
389 | if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] | 368 | BUG_ON(nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != l4proto); |
390 | != l4proto) { | 369 | rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], |
391 | write_unlock_bh(&nf_conntrack_lock); | 370 | &nf_conntrack_l4proto_generic); |
392 | ret = -EBUSY; | ||
393 | goto out; | ||
394 | } | ||
395 | nf_ct_protos[l4proto->l3proto][l4proto->l4proto] | ||
396 | = &nf_conntrack_l4proto_generic; | ||
397 | write_unlock_bh(&nf_conntrack_lock); | 371 | write_unlock_bh(&nf_conntrack_lock); |
372 | synchronize_rcu(); | ||
398 | 373 | ||
399 | nf_ct_l4proto_unregister_sysctl(l4proto); | 374 | nf_ct_l4proto_unregister_sysctl(l4proto); |
400 | 375 | ||
401 | /* Somebody could be still looking at the proto in bh. */ | ||
402 | synchronize_net(); | ||
403 | |||
404 | /* Remove all contrack entries for this protocol */ | 376 | /* Remove all contrack entries for this protocol */ |
405 | nf_ct_iterate_cleanup(kill_l4proto, l4proto); | 377 | nf_ct_iterate_cleanup(kill_l4proto, l4proto); |
406 | |||
407 | out: | ||
408 | return ret; | ||
409 | } | 378 | } |
410 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister); | 379 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister); |
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 76e263668222..0133afa2c7ef 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c | |||
@@ -1,9 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * Connection tracking protocol helper module for SCTP. | 2 | * Connection tracking protocol helper module for SCTP. |
3 | * | 3 | * |
4 | * SCTP is defined in RFC 2960. References to various sections in this code | 4 | * SCTP is defined in RFC 2960. References to various sections in this code |
5 | * are to this RFC. | 5 | * are to this RFC. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
@@ -45,7 +45,7 @@ | |||
45 | static DEFINE_RWLOCK(sctp_lock); | 45 | static DEFINE_RWLOCK(sctp_lock); |
46 | 46 | ||
47 | /* FIXME: Examine ipfilter's timeouts and conntrack transitions more | 47 | /* FIXME: Examine ipfilter's timeouts and conntrack transitions more |
48 | closely. They're more complex. --RR | 48 | closely. They're more complex. --RR |
49 | 49 | ||
50 | And so for me for SCTP :D -Kiran */ | 50 | And so for me for SCTP :D -Kiran */ |
51 | 51 | ||
@@ -94,32 +94,32 @@ static unsigned int * sctp_timeouts[] | |||
94 | #define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT | 94 | #define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT |
95 | #define sIV SCTP_CONNTRACK_MAX | 95 | #define sIV SCTP_CONNTRACK_MAX |
96 | 96 | ||
97 | /* | 97 | /* |
98 | These are the descriptions of the states: | 98 | These are the descriptions of the states: |
99 | 99 | ||
100 | NOTE: These state names are tantalizingly similar to the states of an | 100 | NOTE: These state names are tantalizingly similar to the states of an |
101 | SCTP endpoint. But the interpretation of the states is a little different, | 101 | SCTP endpoint. But the interpretation of the states is a little different, |
102 | considering that these are the states of the connection and not of an end | 102 | considering that these are the states of the connection and not of an end |
103 | point. Please note the subtleties. -Kiran | 103 | point. Please note the subtleties. -Kiran |
104 | 104 | ||
105 | NONE - Nothing so far. | 105 | NONE - Nothing so far. |
106 | COOKIE WAIT - We have seen an INIT chunk in the original direction, or also | 106 | COOKIE WAIT - We have seen an INIT chunk in the original direction, or also |
107 | an INIT_ACK chunk in the reply direction. | 107 | an INIT_ACK chunk in the reply direction. |
108 | COOKIE ECHOED - We have seen a COOKIE_ECHO chunk in the original direction. | 108 | COOKIE ECHOED - We have seen a COOKIE_ECHO chunk in the original direction. |
109 | ESTABLISHED - We have seen a COOKIE_ACK in the reply direction. | 109 | ESTABLISHED - We have seen a COOKIE_ACK in the reply direction. |
110 | SHUTDOWN_SENT - We have seen a SHUTDOWN chunk in the original direction. | 110 | SHUTDOWN_SENT - We have seen a SHUTDOWN chunk in the original direction. |
111 | SHUTDOWN_RECD - We have seen a SHUTDOWN chunk in the reply directoin. | 111 | SHUTDOWN_RECD - We have seen a SHUTDOWN chunk in the reply directoin. |
112 | SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite | 112 | SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite |
113 | to that of the SHUTDOWN chunk. | 113 | to that of the SHUTDOWN chunk. |
114 | CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of | 114 | CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of |
115 | the SHUTDOWN chunk. Connection is closed. | 115 | the SHUTDOWN chunk. Connection is closed. |
116 | */ | 116 | */ |
117 | 117 | ||
118 | /* TODO | 118 | /* TODO |
119 | - I have assumed that the first INIT is in the original direction. | 119 | - I have assumed that the first INIT is in the original direction. |
120 | This messes things when an INIT comes in the reply direction in CLOSED | 120 | This messes things when an INIT comes in the reply direction in CLOSED |
121 | state. | 121 | state. |
122 | - Check the error type in the reply dir before transitioning from | 122 | - Check the error type in the reply dir before transitioning from |
123 | cookie echoed to closed. | 123 | cookie echoed to closed. |
124 | - Sec 5.2.4 of RFC 2960 | 124 | - Sec 5.2.4 of RFC 2960 |
125 | - Multi Homing support. | 125 | - Multi Homing support. |
@@ -237,7 +237,7 @@ static int do_basic_checks(struct nf_conn *conntrack, | |||
237 | for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { | 237 | for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { |
238 | DEBUGP("Chunk Num: %d Type: %d\n", count, sch->type); | 238 | DEBUGP("Chunk Num: %d Type: %d\n", count, sch->type); |
239 | 239 | ||
240 | if (sch->type == SCTP_CID_INIT | 240 | if (sch->type == SCTP_CID_INIT |
241 | || sch->type == SCTP_CID_INIT_ACK | 241 | || sch->type == SCTP_CID_INIT_ACK |
242 | || sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { | 242 | || sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { |
243 | flag = 1; | 243 | flag = 1; |
@@ -277,42 +277,42 @@ static int new_state(enum ip_conntrack_dir dir, | |||
277 | DEBUGP("Chunk type: %d\n", chunk_type); | 277 | DEBUGP("Chunk type: %d\n", chunk_type); |
278 | 278 | ||
279 | switch (chunk_type) { | 279 | switch (chunk_type) { |
280 | case SCTP_CID_INIT: | 280 | case SCTP_CID_INIT: |
281 | DEBUGP("SCTP_CID_INIT\n"); | 281 | DEBUGP("SCTP_CID_INIT\n"); |
282 | i = 0; break; | 282 | i = 0; break; |
283 | case SCTP_CID_INIT_ACK: | 283 | case SCTP_CID_INIT_ACK: |
284 | DEBUGP("SCTP_CID_INIT_ACK\n"); | 284 | DEBUGP("SCTP_CID_INIT_ACK\n"); |
285 | i = 1; break; | 285 | i = 1; break; |
286 | case SCTP_CID_ABORT: | 286 | case SCTP_CID_ABORT: |
287 | DEBUGP("SCTP_CID_ABORT\n"); | 287 | DEBUGP("SCTP_CID_ABORT\n"); |
288 | i = 2; break; | 288 | i = 2; break; |
289 | case SCTP_CID_SHUTDOWN: | 289 | case SCTP_CID_SHUTDOWN: |
290 | DEBUGP("SCTP_CID_SHUTDOWN\n"); | 290 | DEBUGP("SCTP_CID_SHUTDOWN\n"); |
291 | i = 3; break; | 291 | i = 3; break; |
292 | case SCTP_CID_SHUTDOWN_ACK: | 292 | case SCTP_CID_SHUTDOWN_ACK: |
293 | DEBUGP("SCTP_CID_SHUTDOWN_ACK\n"); | 293 | DEBUGP("SCTP_CID_SHUTDOWN_ACK\n"); |
294 | i = 4; break; | 294 | i = 4; break; |
295 | case SCTP_CID_ERROR: | 295 | case SCTP_CID_ERROR: |
296 | DEBUGP("SCTP_CID_ERROR\n"); | 296 | DEBUGP("SCTP_CID_ERROR\n"); |
297 | i = 5; break; | 297 | i = 5; break; |
298 | case SCTP_CID_COOKIE_ECHO: | 298 | case SCTP_CID_COOKIE_ECHO: |
299 | DEBUGP("SCTP_CID_COOKIE_ECHO\n"); | 299 | DEBUGP("SCTP_CID_COOKIE_ECHO\n"); |
300 | i = 6; break; | 300 | i = 6; break; |
301 | case SCTP_CID_COOKIE_ACK: | 301 | case SCTP_CID_COOKIE_ACK: |
302 | DEBUGP("SCTP_CID_COOKIE_ACK\n"); | 302 | DEBUGP("SCTP_CID_COOKIE_ACK\n"); |
303 | i = 7; break; | 303 | i = 7; break; |
304 | case SCTP_CID_SHUTDOWN_COMPLETE: | 304 | case SCTP_CID_SHUTDOWN_COMPLETE: |
305 | DEBUGP("SCTP_CID_SHUTDOWN_COMPLETE\n"); | 305 | DEBUGP("SCTP_CID_SHUTDOWN_COMPLETE\n"); |
306 | i = 8; break; | 306 | i = 8; break; |
307 | default: | 307 | default: |
308 | /* Other chunks like DATA, SACK, HEARTBEAT and | 308 | /* Other chunks like DATA, SACK, HEARTBEAT and |
309 | its ACK do not cause a change in state */ | 309 | its ACK do not cause a change in state */ |
310 | DEBUGP("Unknown chunk type, Will stay in %s\n", | 310 | DEBUGP("Unknown chunk type, Will stay in %s\n", |
311 | sctp_conntrack_names[cur_state]); | 311 | sctp_conntrack_names[cur_state]); |
312 | return cur_state; | 312 | return cur_state; |
313 | } | 313 | } |
314 | 314 | ||
315 | DEBUGP("dir: %d cur_state: %s chunk_type: %d new_state: %s\n", | 315 | DEBUGP("dir: %d cur_state: %s chunk_type: %d new_state: %s\n", |
316 | dir, sctp_conntrack_names[cur_state], chunk_type, | 316 | dir, sctp_conntrack_names[cur_state], chunk_type, |
317 | sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]); | 317 | sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]); |
318 | 318 | ||
@@ -377,7 +377,7 @@ static int sctp_packet(struct nf_conn *conntrack, | |||
377 | /* Sec 8.5.1 (C) */ | 377 | /* Sec 8.5.1 (C) */ |
378 | if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)]) | 378 | if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)]) |
379 | && !(sh->vtag == conntrack->proto.sctp.vtag | 379 | && !(sh->vtag == conntrack->proto.sctp.vtag |
380 | [1 - CTINFO2DIR(ctinfo)] | 380 | [1 - CTINFO2DIR(ctinfo)] |
381 | && (sch->flags & 1))) { | 381 | && (sch->flags & 1))) { |
382 | write_unlock_bh(&sctp_lock); | 382 | write_unlock_bh(&sctp_lock); |
383 | return -1; | 383 | return -1; |
@@ -402,17 +402,17 @@ static int sctp_packet(struct nf_conn *conntrack, | |||
402 | } | 402 | } |
403 | 403 | ||
404 | /* If it is an INIT or an INIT ACK note down the vtag */ | 404 | /* If it is an INIT or an INIT ACK note down the vtag */ |
405 | if (sch->type == SCTP_CID_INIT | 405 | if (sch->type == SCTP_CID_INIT |
406 | || sch->type == SCTP_CID_INIT_ACK) { | 406 | || sch->type == SCTP_CID_INIT_ACK) { |
407 | sctp_inithdr_t _inithdr, *ih; | 407 | sctp_inithdr_t _inithdr, *ih; |
408 | 408 | ||
409 | ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), | 409 | ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), |
410 | sizeof(_inithdr), &_inithdr); | 410 | sizeof(_inithdr), &_inithdr); |
411 | if (ih == NULL) { | 411 | if (ih == NULL) { |
412 | write_unlock_bh(&sctp_lock); | 412 | write_unlock_bh(&sctp_lock); |
413 | return -1; | 413 | return -1; |
414 | } | 414 | } |
415 | DEBUGP("Setting vtag %x for dir %d\n", | 415 | DEBUGP("Setting vtag %x for dir %d\n", |
416 | ih->init_tag, !CTINFO2DIR(ctinfo)); | 416 | ih->init_tag, !CTINFO2DIR(ctinfo)); |
417 | conntrack->proto.sctp.vtag[!CTINFO2DIR(ctinfo)] = ih->init_tag; | 417 | conntrack->proto.sctp.vtag[!CTINFO2DIR(ctinfo)] = ih->init_tag; |
418 | } | 418 | } |
@@ -466,7 +466,7 @@ static int sctp_new(struct nf_conn *conntrack, const struct sk_buff *skb, | |||
466 | newconntrack = SCTP_CONNTRACK_MAX; | 466 | newconntrack = SCTP_CONNTRACK_MAX; |
467 | for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { | 467 | for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { |
468 | /* Don't need lock here: this conntrack not in circulation yet */ | 468 | /* Don't need lock here: this conntrack not in circulation yet */ |
469 | newconntrack = new_state(IP_CT_DIR_ORIGINAL, | 469 | newconntrack = new_state(IP_CT_DIR_ORIGINAL, |
470 | SCTP_CONNTRACK_NONE, sch->type); | 470 | SCTP_CONNTRACK_NONE, sch->type); |
471 | 471 | ||
472 | /* Invalid: delete conntrack */ | 472 | /* Invalid: delete conntrack */ |
@@ -481,14 +481,14 @@ static int sctp_new(struct nf_conn *conntrack, const struct sk_buff *skb, | |||
481 | sctp_inithdr_t _inithdr, *ih; | 481 | sctp_inithdr_t _inithdr, *ih; |
482 | 482 | ||
483 | ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), | 483 | ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), |
484 | sizeof(_inithdr), &_inithdr); | 484 | sizeof(_inithdr), &_inithdr); |
485 | if (ih == NULL) | 485 | if (ih == NULL) |
486 | return 0; | 486 | return 0; |
487 | 487 | ||
488 | DEBUGP("Setting vtag %x for new conn\n", | 488 | DEBUGP("Setting vtag %x for new conn\n", |
489 | ih->init_tag); | 489 | ih->init_tag); |
490 | 490 | ||
491 | conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = | 491 | conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = |
492 | ih->init_tag; | 492 | ih->init_tag; |
493 | } else { | 493 | } else { |
494 | /* Sec 8.5.1 (A) */ | 494 | /* Sec 8.5.1 (A) */ |
@@ -498,7 +498,7 @@ static int sctp_new(struct nf_conn *conntrack, const struct sk_buff *skb, | |||
498 | /* If it is a shutdown ack OOTB packet, we expect a return | 498 | /* If it is a shutdown ack OOTB packet, we expect a return |
499 | shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */ | 499 | shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */ |
500 | else { | 500 | else { |
501 | DEBUGP("Setting vtag %x for new conn OOTB\n", | 501 | DEBUGP("Setting vtag %x for new conn OOTB\n", |
502 | sh->vtag); | 502 | sh->vtag); |
503 | conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; | 503 | conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; |
504 | } | 504 | } |
@@ -698,7 +698,7 @@ int __init nf_conntrack_proto_sctp_init(void) | |||
698 | cleanup_sctp4: | 698 | cleanup_sctp4: |
699 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); | 699 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); |
700 | out: | 700 | out: |
701 | DEBUGP("SCTP conntrack module loading %s\n", | 701 | DEBUGP("SCTP conntrack module loading %s\n", |
702 | ret ? "failed": "succeeded"); | 702 | ret ? "failed": "succeeded"); |
703 | return ret; | 703 | return ret; |
704 | } | 704 | } |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 6fccdcf43e08..aff65aad3c66 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -55,19 +55,19 @@ | |||
55 | /* Protects conntrack->proto.tcp */ | 55 | /* Protects conntrack->proto.tcp */ |
56 | static DEFINE_RWLOCK(tcp_lock); | 56 | static DEFINE_RWLOCK(tcp_lock); |
57 | 57 | ||
58 | /* "Be conservative in what you do, | 58 | /* "Be conservative in what you do, |
59 | be liberal in what you accept from others." | 59 | be liberal in what you accept from others." |
60 | If it's non-zero, we mark only out of window RST segments as INVALID. */ | 60 | If it's non-zero, we mark only out of window RST segments as INVALID. */ |
61 | int nf_ct_tcp_be_liberal __read_mostly = 0; | 61 | static int nf_ct_tcp_be_liberal __read_mostly = 0; |
62 | 62 | ||
63 | /* If it is set to zero, we disable picking up already established | 63 | /* If it is set to zero, we disable picking up already established |
64 | connections. */ | 64 | connections. */ |
65 | int nf_ct_tcp_loose __read_mostly = 1; | 65 | static int nf_ct_tcp_loose __read_mostly = 1; |
66 | 66 | ||
67 | /* Max number of the retransmitted packets without receiving an (acceptable) | 67 | /* Max number of the retransmitted packets without receiving an (acceptable) |
68 | ACK from the destination. If this number is reached, a shorter timer | 68 | ACK from the destination. If this number is reached, a shorter timer |
69 | will be started. */ | 69 | will be started. */ |
70 | int nf_ct_tcp_max_retrans __read_mostly = 3; | 70 | static int nf_ct_tcp_max_retrans __read_mostly = 3; |
71 | 71 | ||
72 | /* FIXME: Examine ipfilter's timeouts and conntrack transitions more | 72 | /* FIXME: Examine ipfilter's timeouts and conntrack transitions more |
73 | closely. They're more complex. --RR */ | 73 | closely. They're more complex. --RR */ |
@@ -84,7 +84,7 @@ static const char *tcp_conntrack_names[] = { | |||
84 | "CLOSE", | 84 | "CLOSE", |
85 | "LISTEN" | 85 | "LISTEN" |
86 | }; | 86 | }; |
87 | 87 | ||
88 | #define SECS * HZ | 88 | #define SECS * HZ |
89 | #define MINS * 60 SECS | 89 | #define MINS * 60 SECS |
90 | #define HOURS * 60 MINS | 90 | #define HOURS * 60 MINS |
@@ -100,10 +100,10 @@ static unsigned int nf_ct_tcp_timeout_time_wait __read_mostly = 2 MINS; | |||
100 | static unsigned int nf_ct_tcp_timeout_close __read_mostly = 10 SECS; | 100 | static unsigned int nf_ct_tcp_timeout_close __read_mostly = 10 SECS; |
101 | 101 | ||
102 | /* RFC1122 says the R2 limit should be at least 100 seconds. | 102 | /* RFC1122 says the R2 limit should be at least 100 seconds. |
103 | Linux uses 15 packets as limit, which corresponds | 103 | Linux uses 15 packets as limit, which corresponds |
104 | to ~13-30min depending on RTO. */ | 104 | to ~13-30min depending on RTO. */ |
105 | static unsigned int nf_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS; | 105 | static unsigned int nf_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS; |
106 | 106 | ||
107 | static unsigned int * tcp_timeouts[] = { | 107 | static unsigned int * tcp_timeouts[] = { |
108 | NULL, /* TCP_CONNTRACK_NONE */ | 108 | NULL, /* TCP_CONNTRACK_NONE */ |
109 | &nf_ct_tcp_timeout_syn_sent, /* TCP_CONNTRACK_SYN_SENT, */ | 109 | &nf_ct_tcp_timeout_syn_sent, /* TCP_CONNTRACK_SYN_SENT, */ |
@@ -116,7 +116,7 @@ static unsigned int * tcp_timeouts[] = { | |||
116 | &nf_ct_tcp_timeout_close, /* TCP_CONNTRACK_CLOSE, */ | 116 | &nf_ct_tcp_timeout_close, /* TCP_CONNTRACK_CLOSE, */ |
117 | NULL, /* TCP_CONNTRACK_LISTEN */ | 117 | NULL, /* TCP_CONNTRACK_LISTEN */ |
118 | }; | 118 | }; |
119 | 119 | ||
120 | #define sNO TCP_CONNTRACK_NONE | 120 | #define sNO TCP_CONNTRACK_NONE |
121 | #define sSS TCP_CONNTRACK_SYN_SENT | 121 | #define sSS TCP_CONNTRACK_SYN_SENT |
122 | #define sSR TCP_CONNTRACK_SYN_RECV | 122 | #define sSR TCP_CONNTRACK_SYN_RECV |
@@ -139,13 +139,13 @@ enum tcp_bit_set { | |||
139 | TCP_RST_SET, | 139 | TCP_RST_SET, |
140 | TCP_NONE_SET, | 140 | TCP_NONE_SET, |
141 | }; | 141 | }; |
142 | 142 | ||
143 | /* | 143 | /* |
144 | * The TCP state transition table needs a few words... | 144 | * The TCP state transition table needs a few words... |
145 | * | 145 | * |
146 | * We are the man in the middle. All the packets go through us | 146 | * We are the man in the middle. All the packets go through us |
147 | * but might get lost in transit to the destination. | 147 | * but might get lost in transit to the destination. |
148 | * It is assumed that the destinations can't receive segments | 148 | * It is assumed that the destinations can't receive segments |
149 | * we haven't seen. | 149 | * we haven't seen. |
150 | * | 150 | * |
151 | * The checked segment is in window, but our windows are *not* | 151 | * The checked segment is in window, but our windows are *not* |
@@ -155,11 +155,11 @@ enum tcp_bit_set { | |||
155 | * The meaning of the states are: | 155 | * The meaning of the states are: |
156 | * | 156 | * |
157 | * NONE: initial state | 157 | * NONE: initial state |
158 | * SYN_SENT: SYN-only packet seen | 158 | * SYN_SENT: SYN-only packet seen |
159 | * SYN_RECV: SYN-ACK packet seen | 159 | * SYN_RECV: SYN-ACK packet seen |
160 | * ESTABLISHED: ACK packet seen | 160 | * ESTABLISHED: ACK packet seen |
161 | * FIN_WAIT: FIN packet seen | 161 | * FIN_WAIT: FIN packet seen |
162 | * CLOSE_WAIT: ACK seen (after FIN) | 162 | * CLOSE_WAIT: ACK seen (after FIN) |
163 | * LAST_ACK: FIN seen (after FIN) | 163 | * LAST_ACK: FIN seen (after FIN) |
164 | * TIME_WAIT: last ACK seen | 164 | * TIME_WAIT: last ACK seen |
165 | * CLOSE: closed connection | 165 | * CLOSE: closed connection |
@@ -167,8 +167,8 @@ enum tcp_bit_set { | |||
167 | * LISTEN state is not used. | 167 | * LISTEN state is not used. |
168 | * | 168 | * |
169 | * Packets marked as IGNORED (sIG): | 169 | * Packets marked as IGNORED (sIG): |
170 | * if they may be either invalid or valid | 170 | * if they may be either invalid or valid |
171 | * and the receiver may send back a connection | 171 | * and the receiver may send back a connection |
172 | * closing RST or a SYN/ACK. | 172 | * closing RST or a SYN/ACK. |
173 | * | 173 | * |
174 | * Packets marked as INVALID (sIV): | 174 | * Packets marked as INVALID (sIV): |
@@ -185,7 +185,7 @@ static enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
185 | * sSS -> sSS Retransmitted SYN | 185 | * sSS -> sSS Retransmitted SYN |
186 | * sSR -> sIG Late retransmitted SYN? | 186 | * sSR -> sIG Late retransmitted SYN? |
187 | * sES -> sIG Error: SYNs in window outside the SYN_SENT state | 187 | * sES -> sIG Error: SYNs in window outside the SYN_SENT state |
188 | * are errors. Receiver will reply with RST | 188 | * are errors. Receiver will reply with RST |
189 | * and close the connection. | 189 | * and close the connection. |
190 | * Or we are not in sync and hold a dead connection. | 190 | * Or we are not in sync and hold a dead connection. |
191 | * sFW -> sIG | 191 | * sFW -> sIG |
@@ -198,10 +198,10 @@ static enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
198 | /*synack*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }, | 198 | /*synack*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }, |
199 | /* | 199 | /* |
200 | * A SYN/ACK from the client is always invalid: | 200 | * A SYN/ACK from the client is always invalid: |
201 | * - either it tries to set up a simultaneous open, which is | 201 | * - either it tries to set up a simultaneous open, which is |
202 | * not supported; | 202 | * not supported; |
203 | * - or the firewall has just been inserted between the two hosts | 203 | * - or the firewall has just been inserted between the two hosts |
204 | * during the session set-up. The SYN will be retransmitted | 204 | * during the session set-up. The SYN will be retransmitted |
205 | * by the true client (or it'll time out). | 205 | * by the true client (or it'll time out). |
206 | */ | 206 | */ |
207 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ | 207 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ |
@@ -213,7 +213,7 @@ static enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
213 | * sSR -> sFW Close started. | 213 | * sSR -> sFW Close started. |
214 | * sES -> sFW | 214 | * sES -> sFW |
215 | * sFW -> sLA FIN seen in both directions, waiting for | 215 | * sFW -> sLA FIN seen in both directions, waiting for |
216 | * the last ACK. | 216 | * the last ACK. |
217 | * Migth be a retransmitted FIN as well... | 217 | * Migth be a retransmitted FIN as well... |
218 | * sCW -> sLA | 218 | * sCW -> sLA |
219 | * sLA -> sLA Retransmitted FIN. Remain in the same state. | 219 | * sLA -> sLA Retransmitted FIN. Remain in the same state. |
@@ -291,7 +291,7 @@ static enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
291 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ | 291 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ |
292 | /*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV }, | 292 | /*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV }, |
293 | /*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV } | 293 | /*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV } |
294 | } | 294 | } |
295 | }; | 295 | }; |
296 | 296 | ||
297 | static int tcp_pkt_to_tuple(const struct sk_buff *skb, | 297 | static int tcp_pkt_to_tuple(const struct sk_buff *skb, |
@@ -352,21 +352,21 @@ static unsigned int get_conntrack_index(const struct tcphdr *tcph) | |||
352 | 352 | ||
353 | /* TCP connection tracking based on 'Real Stateful TCP Packet Filtering | 353 | /* TCP connection tracking based on 'Real Stateful TCP Packet Filtering |
354 | in IP Filter' by Guido van Rooij. | 354 | in IP Filter' by Guido van Rooij. |
355 | 355 | ||
356 | http://www.nluug.nl/events/sane2000/papers.html | 356 | http://www.nluug.nl/events/sane2000/papers.html |
357 | http://www.iae.nl/users/guido/papers/tcp_filtering.ps.gz | 357 | http://www.iae.nl/users/guido/papers/tcp_filtering.ps.gz |
358 | 358 | ||
359 | The boundaries and the conditions are changed according to RFC793: | 359 | The boundaries and the conditions are changed according to RFC793: |
360 | the packet must intersect the window (i.e. segments may be | 360 | the packet must intersect the window (i.e. segments may be |
361 | after the right or before the left edge) and thus receivers may ACK | 361 | after the right or before the left edge) and thus receivers may ACK |
362 | segments after the right edge of the window. | 362 | segments after the right edge of the window. |
363 | 363 | ||
364 | td_maxend = max(sack + max(win,1)) seen in reply packets | 364 | td_maxend = max(sack + max(win,1)) seen in reply packets |
365 | td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets | 365 | td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets |
366 | td_maxwin += seq + len - sender.td_maxend | 366 | td_maxwin += seq + len - sender.td_maxend |
367 | if seq + len > sender.td_maxend | 367 | if seq + len > sender.td_maxend |
368 | td_end = max(seq + len) seen in sent packets | 368 | td_end = max(seq + len) seen in sent packets |
369 | 369 | ||
370 | I. Upper bound for valid data: seq <= sender.td_maxend | 370 | I. Upper bound for valid data: seq <= sender.td_maxend |
371 | II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin | 371 | II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin |
372 | III. Upper bound for valid ack: sack <= receiver.td_end | 372 | III. Upper bound for valid ack: sack <= receiver.td_end |
@@ -374,8 +374,8 @@ static unsigned int get_conntrack_index(const struct tcphdr *tcph) | |||
374 | 374 | ||
375 | where sack is the highest right edge of sack block found in the packet. | 375 | where sack is the highest right edge of sack block found in the packet. |
376 | 376 | ||
377 | The upper bound limit for a valid ack is not ignored - | 377 | The upper bound limit for a valid ack is not ignored - |
378 | we doesn't have to deal with fragments. | 378 | we doesn't have to deal with fragments. |
379 | */ | 379 | */ |
380 | 380 | ||
381 | static inline __u32 segment_seq_plus_len(__u32 seq, | 381 | static inline __u32 segment_seq_plus_len(__u32 seq, |
@@ -388,19 +388,19 @@ static inline __u32 segment_seq_plus_len(__u32 seq, | |||
388 | return (seq + len - dataoff - tcph->doff*4 | 388 | return (seq + len - dataoff - tcph->doff*4 |
389 | + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0)); | 389 | + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0)); |
390 | } | 390 | } |
391 | 391 | ||
392 | /* Fixme: what about big packets? */ | 392 | /* Fixme: what about big packets? */ |
393 | #define MAXACKWINCONST 66000 | 393 | #define MAXACKWINCONST 66000 |
394 | #define MAXACKWINDOW(sender) \ | 394 | #define MAXACKWINDOW(sender) \ |
395 | ((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \ | 395 | ((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \ |
396 | : MAXACKWINCONST) | 396 | : MAXACKWINCONST) |
397 | 397 | ||
398 | /* | 398 | /* |
399 | * Simplified tcp_parse_options routine from tcp_input.c | 399 | * Simplified tcp_parse_options routine from tcp_input.c |
400 | */ | 400 | */ |
401 | static void tcp_options(const struct sk_buff *skb, | 401 | static void tcp_options(const struct sk_buff *skb, |
402 | unsigned int dataoff, | 402 | unsigned int dataoff, |
403 | struct tcphdr *tcph, | 403 | struct tcphdr *tcph, |
404 | struct ip_ct_tcp_state *state) | 404 | struct ip_ct_tcp_state *state) |
405 | { | 405 | { |
406 | unsigned char buff[(15 * 4) - sizeof(struct tcphdr)]; | 406 | unsigned char buff[(15 * 4) - sizeof(struct tcphdr)]; |
@@ -414,7 +414,7 @@ static void tcp_options(const struct sk_buff *skb, | |||
414 | length, buff); | 414 | length, buff); |
415 | BUG_ON(ptr == NULL); | 415 | BUG_ON(ptr == NULL); |
416 | 416 | ||
417 | state->td_scale = | 417 | state->td_scale = |
418 | state->flags = 0; | 418 | state->flags = 0; |
419 | 419 | ||
420 | while (length > 0) { | 420 | while (length > 0) { |
@@ -434,7 +434,7 @@ static void tcp_options(const struct sk_buff *skb, | |||
434 | if (opsize > length) | 434 | if (opsize > length) |
435 | break; /* don't parse partial options */ | 435 | break; /* don't parse partial options */ |
436 | 436 | ||
437 | if (opcode == TCPOPT_SACK_PERM | 437 | if (opcode == TCPOPT_SACK_PERM |
438 | && opsize == TCPOLEN_SACK_PERM) | 438 | && opsize == TCPOLEN_SACK_PERM) |
439 | state->flags |= IP_CT_TCP_FLAG_SACK_PERM; | 439 | state->flags |= IP_CT_TCP_FLAG_SACK_PERM; |
440 | else if (opcode == TCPOPT_WINDOW | 440 | else if (opcode == TCPOPT_WINDOW |
@@ -457,7 +457,7 @@ static void tcp_options(const struct sk_buff *skb, | |||
457 | static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, | 457 | static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, |
458 | struct tcphdr *tcph, __u32 *sack) | 458 | struct tcphdr *tcph, __u32 *sack) |
459 | { | 459 | { |
460 | unsigned char buff[(15 * 4) - sizeof(struct tcphdr)]; | 460 | unsigned char buff[(15 * 4) - sizeof(struct tcphdr)]; |
461 | unsigned char *ptr; | 461 | unsigned char *ptr; |
462 | int length = (tcph->doff*4) - sizeof(struct tcphdr); | 462 | int length = (tcph->doff*4) - sizeof(struct tcphdr); |
463 | __u32 tmp; | 463 | __u32 tmp; |
@@ -472,10 +472,10 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, | |||
472 | /* Fast path for timestamp-only option */ | 472 | /* Fast path for timestamp-only option */ |
473 | if (length == TCPOLEN_TSTAMP_ALIGNED*4 | 473 | if (length == TCPOLEN_TSTAMP_ALIGNED*4 |
474 | && *(__be32 *)ptr == | 474 | && *(__be32 *)ptr == |
475 | __constant_htonl((TCPOPT_NOP << 24) | 475 | __constant_htonl((TCPOPT_NOP << 24) |
476 | | (TCPOPT_NOP << 16) | 476 | | (TCPOPT_NOP << 16) |
477 | | (TCPOPT_TIMESTAMP << 8) | 477 | | (TCPOPT_TIMESTAMP << 8) |
478 | | TCPOLEN_TIMESTAMP)) | 478 | | TCPOLEN_TIMESTAMP)) |
479 | return; | 479 | return; |
480 | 480 | ||
481 | while (length > 0) { | 481 | while (length > 0) { |
@@ -495,15 +495,15 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, | |||
495 | if (opsize > length) | 495 | if (opsize > length) |
496 | break; /* don't parse partial options */ | 496 | break; /* don't parse partial options */ |
497 | 497 | ||
498 | if (opcode == TCPOPT_SACK | 498 | if (opcode == TCPOPT_SACK |
499 | && opsize >= (TCPOLEN_SACK_BASE | 499 | && opsize >= (TCPOLEN_SACK_BASE |
500 | + TCPOLEN_SACK_PERBLOCK) | 500 | + TCPOLEN_SACK_PERBLOCK) |
501 | && !((opsize - TCPOLEN_SACK_BASE) | 501 | && !((opsize - TCPOLEN_SACK_BASE) |
502 | % TCPOLEN_SACK_PERBLOCK)) { | 502 | % TCPOLEN_SACK_PERBLOCK)) { |
503 | for (i = 0; | 503 | for (i = 0; |
504 | i < (opsize - TCPOLEN_SACK_BASE); | 504 | i < (opsize - TCPOLEN_SACK_BASE); |
505 | i += TCPOLEN_SACK_PERBLOCK) { | 505 | i += TCPOLEN_SACK_PERBLOCK) { |
506 | tmp = ntohl(*((__be32 *)(ptr+i)+1)); | 506 | tmp = ntohl(*((__be32 *)(ptr+i)+1)); |
507 | 507 | ||
508 | if (after(tmp, *sack)) | 508 | if (after(tmp, *sack)) |
509 | *sack = tmp; | 509 | *sack = tmp; |
@@ -516,12 +516,12 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, | |||
516 | } | 516 | } |
517 | } | 517 | } |
518 | 518 | ||
519 | static int tcp_in_window(struct ip_ct_tcp *state, | 519 | static int tcp_in_window(struct ip_ct_tcp *state, |
520 | enum ip_conntrack_dir dir, | 520 | enum ip_conntrack_dir dir, |
521 | unsigned int index, | 521 | unsigned int index, |
522 | const struct sk_buff *skb, | 522 | const struct sk_buff *skb, |
523 | unsigned int dataoff, | 523 | unsigned int dataoff, |
524 | struct tcphdr *tcph, | 524 | struct tcphdr *tcph, |
525 | int pf) | 525 | int pf) |
526 | { | 526 | { |
527 | struct ip_ct_tcp_state *sender = &state->seen[dir]; | 527 | struct ip_ct_tcp_state *sender = &state->seen[dir]; |
@@ -543,14 +543,14 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
543 | DEBUGP("tcp_in_window: START\n"); | 543 | DEBUGP("tcp_in_window: START\n"); |
544 | DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " | 544 | DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " |
545 | "seq=%u ack=%u sack=%u win=%u end=%u\n", | 545 | "seq=%u ack=%u sack=%u win=%u end=%u\n", |
546 | NIPQUAD(iph->saddr), ntohs(tcph->source), | 546 | NIPQUAD(iph->saddr), ntohs(tcph->source), |
547 | NIPQUAD(iph->daddr), ntohs(tcph->dest), | 547 | NIPQUAD(iph->daddr), ntohs(tcph->dest), |
548 | seq, ack, sack, win, end); | 548 | seq, ack, sack, win, end); |
549 | DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " | 549 | DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " |
550 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", | 550 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", |
551 | sender->td_end, sender->td_maxend, sender->td_maxwin, | 551 | sender->td_end, sender->td_maxend, sender->td_maxwin, |
552 | sender->td_scale, | 552 | sender->td_scale, |
553 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, | 553 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, |
554 | receiver->td_scale); | 554 | receiver->td_scale); |
555 | 555 | ||
556 | if (sender->td_end == 0) { | 556 | if (sender->td_end == 0) { |
@@ -561,26 +561,26 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
561 | /* | 561 | /* |
562 | * Outgoing SYN-ACK in reply to a SYN. | 562 | * Outgoing SYN-ACK in reply to a SYN. |
563 | */ | 563 | */ |
564 | sender->td_end = | 564 | sender->td_end = |
565 | sender->td_maxend = end; | 565 | sender->td_maxend = end; |
566 | sender->td_maxwin = (win == 0 ? 1 : win); | 566 | sender->td_maxwin = (win == 0 ? 1 : win); |
567 | 567 | ||
568 | tcp_options(skb, dataoff, tcph, sender); | 568 | tcp_options(skb, dataoff, tcph, sender); |
569 | /* | 569 | /* |
570 | * RFC 1323: | 570 | * RFC 1323: |
571 | * Both sides must send the Window Scale option | 571 | * Both sides must send the Window Scale option |
572 | * to enable window scaling in either direction. | 572 | * to enable window scaling in either direction. |
573 | */ | 573 | */ |
574 | if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE | 574 | if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE |
575 | && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) | 575 | && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) |
576 | sender->td_scale = | 576 | sender->td_scale = |
577 | receiver->td_scale = 0; | 577 | receiver->td_scale = 0; |
578 | } else { | 578 | } else { |
579 | /* | 579 | /* |
580 | * We are in the middle of a connection, | 580 | * We are in the middle of a connection, |
581 | * its history is lost for us. | 581 | * its history is lost for us. |
582 | * Let's try to use the data from the packet. | 582 | * Let's try to use the data from the packet. |
583 | */ | 583 | */ |
584 | sender->td_end = end; | 584 | sender->td_end = end; |
585 | sender->td_maxwin = (win == 0 ? 1 : win); | 585 | sender->td_maxwin = (win == 0 ? 1 : win); |
586 | sender->td_maxend = end + sender->td_maxwin; | 586 | sender->td_maxend = end + sender->td_maxwin; |
@@ -592,7 +592,7 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
592 | && after(end, sender->td_end)) { | 592 | && after(end, sender->td_end)) { |
593 | /* | 593 | /* |
594 | * RFC 793: "if a TCP is reinitialized ... then it need | 594 | * RFC 793: "if a TCP is reinitialized ... then it need |
595 | * not wait at all; it must only be sure to use sequence | 595 | * not wait at all; it must only be sure to use sequence |
596 | * numbers larger than those recently used." | 596 | * numbers larger than those recently used." |
597 | */ | 597 | */ |
598 | sender->td_end = | 598 | sender->td_end = |
@@ -607,8 +607,8 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
607 | * If there is no ACK, just pretend it was set and OK. | 607 | * If there is no ACK, just pretend it was set and OK. |
608 | */ | 608 | */ |
609 | ack = sack = receiver->td_end; | 609 | ack = sack = receiver->td_end; |
610 | } else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) == | 610 | } else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) == |
611 | (TCP_FLAG_ACK|TCP_FLAG_RST)) | 611 | (TCP_FLAG_ACK|TCP_FLAG_RST)) |
612 | && (ack == 0)) { | 612 | && (ack == 0)) { |
613 | /* | 613 | /* |
614 | * Broken TCP stacks, that set ACK in RST packets as well | 614 | * Broken TCP stacks, that set ACK in RST packets as well |
@@ -637,21 +637,21 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
637 | DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " | 637 | DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " |
638 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", | 638 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", |
639 | sender->td_end, sender->td_maxend, sender->td_maxwin, | 639 | sender->td_end, sender->td_maxend, sender->td_maxwin, |
640 | sender->td_scale, | 640 | sender->td_scale, |
641 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, | 641 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, |
642 | receiver->td_scale); | 642 | receiver->td_scale); |
643 | 643 | ||
644 | DEBUGP("tcp_in_window: I=%i II=%i III=%i IV=%i\n", | 644 | DEBUGP("tcp_in_window: I=%i II=%i III=%i IV=%i\n", |
645 | before(seq, sender->td_maxend + 1), | 645 | before(seq, sender->td_maxend + 1), |
646 | after(end, sender->td_end - receiver->td_maxwin - 1), | 646 | after(end, sender->td_end - receiver->td_maxwin - 1), |
647 | before(sack, receiver->td_end + 1), | 647 | before(sack, receiver->td_end + 1), |
648 | after(ack, receiver->td_end - MAXACKWINDOW(sender))); | 648 | after(ack, receiver->td_end - MAXACKWINDOW(sender))); |
649 | 649 | ||
650 | if (before(seq, sender->td_maxend + 1) && | 650 | if (before(seq, sender->td_maxend + 1) && |
651 | after(end, sender->td_end - receiver->td_maxwin - 1) && | 651 | after(end, sender->td_end - receiver->td_maxwin - 1) && |
652 | before(sack, receiver->td_end + 1) && | 652 | before(sack, receiver->td_end + 1) && |
653 | after(ack, receiver->td_end - MAXACKWINDOW(sender))) { | 653 | after(ack, receiver->td_end - MAXACKWINDOW(sender))) { |
654 | /* | 654 | /* |
655 | * Take into account window scaling (RFC 1323). | 655 | * Take into account window scaling (RFC 1323). |
656 | */ | 656 | */ |
657 | if (!tcph->syn) | 657 | if (!tcph->syn) |
@@ -676,7 +676,7 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
676 | receiver->td_maxend++; | 676 | receiver->td_maxend++; |
677 | } | 677 | } |
678 | 678 | ||
679 | /* | 679 | /* |
680 | * Check retransmissions. | 680 | * Check retransmissions. |
681 | */ | 681 | */ |
682 | if (index == TCP_ACK_SET) { | 682 | if (index == TCP_ACK_SET) { |
@@ -712,11 +712,11 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
712 | : "ACK is over the upper bound (ACKed data not seen yet)" | 712 | : "ACK is over the upper bound (ACKed data not seen yet)" |
713 | : "SEQ is under the lower bound (already ACKed data retransmitted)" | 713 | : "SEQ is under the lower bound (already ACKed data retransmitted)" |
714 | : "SEQ is over the upper bound (over the window of the receiver)"); | 714 | : "SEQ is over the upper bound (over the window of the receiver)"); |
715 | } | 715 | } |
716 | 716 | ||
717 | DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u " | 717 | DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u " |
718 | "receiver end=%u maxend=%u maxwin=%u\n", | 718 | "receiver end=%u maxend=%u maxwin=%u\n", |
719 | res, sender->td_end, sender->td_maxend, sender->td_maxwin, | 719 | res, sender->td_end, sender->td_maxend, sender->td_maxwin, |
720 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin); | 720 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin); |
721 | 721 | ||
722 | return res; | 722 | return res; |
@@ -727,7 +727,7 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
727 | /* Caller must linearize skb at tcp header. */ | 727 | /* Caller must linearize skb at tcp header. */ |
728 | void nf_conntrack_tcp_update(struct sk_buff *skb, | 728 | void nf_conntrack_tcp_update(struct sk_buff *skb, |
729 | unsigned int dataoff, | 729 | unsigned int dataoff, |
730 | struct nf_conn *conntrack, | 730 | struct nf_conn *conntrack, |
731 | int dir) | 731 | int dir) |
732 | { | 732 | { |
733 | struct tcphdr *tcph = (void *)skb->data + dataoff; | 733 | struct tcphdr *tcph = (void *)skb->data + dataoff; |
@@ -750,7 +750,7 @@ void nf_conntrack_tcp_update(struct sk_buff *skb, | |||
750 | DEBUGP("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i " | 750 | DEBUGP("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i " |
751 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", | 751 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", |
752 | sender->td_end, sender->td_maxend, sender->td_maxwin, | 752 | sender->td_end, sender->td_maxend, sender->td_maxwin, |
753 | sender->td_scale, | 753 | sender->td_scale, |
754 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, | 754 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, |
755 | receiver->td_scale); | 755 | receiver->td_scale); |
756 | } | 756 | } |
@@ -804,8 +804,8 @@ static int tcp_error(struct sk_buff *skb, | |||
804 | nf_log_packet(pf, 0, skb, NULL, NULL, NULL, | 804 | nf_log_packet(pf, 0, skb, NULL, NULL, NULL, |
805 | "nf_ct_tcp: short packet "); | 805 | "nf_ct_tcp: short packet "); |
806 | return -NF_ACCEPT; | 806 | return -NF_ACCEPT; |
807 | } | 807 | } |
808 | 808 | ||
809 | /* Not whole TCP header or malformed packet */ | 809 | /* Not whole TCP header or malformed packet */ |
810 | if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) { | 810 | if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) { |
811 | if (LOG_INVALID(IPPROTO_TCP)) | 811 | if (LOG_INVALID(IPPROTO_TCP)) |
@@ -813,7 +813,7 @@ static int tcp_error(struct sk_buff *skb, | |||
813 | "nf_ct_tcp: truncated/malformed packet "); | 813 | "nf_ct_tcp: truncated/malformed packet "); |
814 | return -NF_ACCEPT; | 814 | return -NF_ACCEPT; |
815 | } | 815 | } |
816 | 816 | ||
817 | /* Checksum invalid? Ignore. | 817 | /* Checksum invalid? Ignore. |
818 | * We skip checking packets on the outgoing path | 818 | * We skip checking packets on the outgoing path |
819 | * because the checksum is assumed to be correct. | 819 | * because the checksum is assumed to be correct. |
@@ -870,28 +870,28 @@ static int tcp_packet(struct nf_conn *conntrack, | |||
870 | * | 870 | * |
871 | * a) SYN in ORIGINAL | 871 | * a) SYN in ORIGINAL |
872 | * b) SYN/ACK in REPLY | 872 | * b) SYN/ACK in REPLY |
873 | * c) ACK in reply direction after initial SYN in original. | 873 | * c) ACK in reply direction after initial SYN in original. |
874 | */ | 874 | */ |
875 | if (index == TCP_SYNACK_SET | 875 | if (index == TCP_SYNACK_SET |
876 | && conntrack->proto.tcp.last_index == TCP_SYN_SET | 876 | && conntrack->proto.tcp.last_index == TCP_SYN_SET |
877 | && conntrack->proto.tcp.last_dir != dir | 877 | && conntrack->proto.tcp.last_dir != dir |
878 | && ntohl(th->ack_seq) == | 878 | && ntohl(th->ack_seq) == |
879 | conntrack->proto.tcp.last_end) { | 879 | conntrack->proto.tcp.last_end) { |
880 | /* This SYN/ACK acknowledges a SYN that we earlier | 880 | /* This SYN/ACK acknowledges a SYN that we earlier |
881 | * ignored as invalid. This means that the client and | 881 | * ignored as invalid. This means that the client and |
882 | * the server are both in sync, while the firewall is | 882 | * the server are both in sync, while the firewall is |
883 | * not. We kill this session and block the SYN/ACK so | 883 | * not. We kill this session and block the SYN/ACK so |
884 | * that the client cannot but retransmit its SYN and | 884 | * that the client cannot but retransmit its SYN and |
885 | * thus initiate a clean new session. | 885 | * thus initiate a clean new session. |
886 | */ | 886 | */ |
887 | write_unlock_bh(&tcp_lock); | 887 | write_unlock_bh(&tcp_lock); |
888 | if (LOG_INVALID(IPPROTO_TCP)) | 888 | if (LOG_INVALID(IPPROTO_TCP)) |
889 | nf_log_packet(pf, 0, skb, NULL, NULL, NULL, | 889 | nf_log_packet(pf, 0, skb, NULL, NULL, NULL, |
890 | "nf_ct_tcp: killing out of sync session "); | 890 | "nf_ct_tcp: killing out of sync session "); |
891 | if (del_timer(&conntrack->timeout)) | 891 | if (del_timer(&conntrack->timeout)) |
892 | conntrack->timeout.function((unsigned long) | 892 | conntrack->timeout.function((unsigned long) |
893 | conntrack); | 893 | conntrack); |
894 | return -NF_DROP; | 894 | return -NF_DROP; |
895 | } | 895 | } |
896 | conntrack->proto.tcp.last_index = index; | 896 | conntrack->proto.tcp.last_index = index; |
897 | conntrack->proto.tcp.last_dir = dir; | 897 | conntrack->proto.tcp.last_dir = dir; |
@@ -921,13 +921,13 @@ static int tcp_packet(struct nf_conn *conntrack, | |||
921 | IP_CT_TCP_FLAG_CLOSE_INIT) | 921 | IP_CT_TCP_FLAG_CLOSE_INIT) |
922 | || after(ntohl(th->seq), | 922 | || after(ntohl(th->seq), |
923 | conntrack->proto.tcp.seen[dir].td_end)) { | 923 | conntrack->proto.tcp.seen[dir].td_end)) { |
924 | /* Attempt to reopen a closed connection. | 924 | /* Attempt to reopen a closed connection. |
925 | * Delete this connection and look up again. */ | 925 | * Delete this connection and look up again. */ |
926 | write_unlock_bh(&tcp_lock); | 926 | write_unlock_bh(&tcp_lock); |
927 | if (del_timer(&conntrack->timeout)) | 927 | if (del_timer(&conntrack->timeout)) |
928 | conntrack->timeout.function((unsigned long) | 928 | conntrack->timeout.function((unsigned long) |
929 | conntrack); | 929 | conntrack); |
930 | return -NF_REPEAT; | 930 | return -NF_REPEAT; |
931 | } else { | 931 | } else { |
932 | write_unlock_bh(&tcp_lock); | 932 | write_unlock_bh(&tcp_lock); |
933 | if (LOG_INVALID(IPPROTO_TCP)) | 933 | if (LOG_INVALID(IPPROTO_TCP)) |
@@ -938,9 +938,9 @@ static int tcp_packet(struct nf_conn *conntrack, | |||
938 | case TCP_CONNTRACK_CLOSE: | 938 | case TCP_CONNTRACK_CLOSE: |
939 | if (index == TCP_RST_SET | 939 | if (index == TCP_RST_SET |
940 | && ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status) | 940 | && ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status) |
941 | && conntrack->proto.tcp.last_index == TCP_SYN_SET) | 941 | && conntrack->proto.tcp.last_index == TCP_SYN_SET) |
942 | || (!test_bit(IPS_ASSURED_BIT, &conntrack->status) | 942 | || (!test_bit(IPS_ASSURED_BIT, &conntrack->status) |
943 | && conntrack->proto.tcp.last_index == TCP_ACK_SET)) | 943 | && conntrack->proto.tcp.last_index == TCP_ACK_SET)) |
944 | && ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) { | 944 | && ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) { |
945 | /* RST sent to invalid SYN or ACK we had let through | 945 | /* RST sent to invalid SYN or ACK we had let through |
946 | * at a) and c) above: | 946 | * at a) and c) above: |
@@ -1005,8 +1005,8 @@ static int tcp_packet(struct nf_conn *conntrack, | |||
1005 | && (old_state == TCP_CONNTRACK_SYN_RECV | 1005 | && (old_state == TCP_CONNTRACK_SYN_RECV |
1006 | || old_state == TCP_CONNTRACK_ESTABLISHED) | 1006 | || old_state == TCP_CONNTRACK_ESTABLISHED) |
1007 | && new_state == TCP_CONNTRACK_ESTABLISHED) { | 1007 | && new_state == TCP_CONNTRACK_ESTABLISHED) { |
1008 | /* Set ASSURED if we see see valid ack in ESTABLISHED | 1008 | /* Set ASSURED if we see see valid ack in ESTABLISHED |
1009 | after SYN_RECV or a valid answer for a picked up | 1009 | after SYN_RECV or a valid answer for a picked up |
1010 | connection. */ | 1010 | connection. */ |
1011 | set_bit(IPS_ASSURED_BIT, &conntrack->status); | 1011 | set_bit(IPS_ASSURED_BIT, &conntrack->status); |
1012 | nf_conntrack_event_cache(IPCT_STATUS, skb); | 1012 | nf_conntrack_event_cache(IPCT_STATUS, skb); |
@@ -1015,7 +1015,7 @@ static int tcp_packet(struct nf_conn *conntrack, | |||
1015 | 1015 | ||
1016 | return NF_ACCEPT; | 1016 | return NF_ACCEPT; |
1017 | } | 1017 | } |
1018 | 1018 | ||
1019 | /* Called when a new connection for this protocol found. */ | 1019 | /* Called when a new connection for this protocol found. */ |
1020 | static int tcp_new(struct nf_conn *conntrack, | 1020 | static int tcp_new(struct nf_conn *conntrack, |
1021 | const struct sk_buff *skb, | 1021 | const struct sk_buff *skb, |
@@ -1071,7 +1071,7 @@ static int tcp_new(struct nf_conn *conntrack, | |||
1071 | if (conntrack->proto.tcp.seen[0].td_maxwin == 0) | 1071 | if (conntrack->proto.tcp.seen[0].td_maxwin == 0) |
1072 | conntrack->proto.tcp.seen[0].td_maxwin = 1; | 1072 | conntrack->proto.tcp.seen[0].td_maxwin = 1; |
1073 | conntrack->proto.tcp.seen[0].td_maxend = | 1073 | conntrack->proto.tcp.seen[0].td_maxend = |
1074 | conntrack->proto.tcp.seen[0].td_end + | 1074 | conntrack->proto.tcp.seen[0].td_end + |
1075 | conntrack->proto.tcp.seen[0].td_maxwin; | 1075 | conntrack->proto.tcp.seen[0].td_maxwin; |
1076 | conntrack->proto.tcp.seen[0].td_scale = 0; | 1076 | conntrack->proto.tcp.seen[0].td_scale = 0; |
1077 | 1077 | ||
@@ -1081,20 +1081,20 @@ static int tcp_new(struct nf_conn *conntrack, | |||
1081 | conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM | | 1081 | conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM | |
1082 | IP_CT_TCP_FLAG_BE_LIBERAL; | 1082 | IP_CT_TCP_FLAG_BE_LIBERAL; |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | conntrack->proto.tcp.seen[1].td_end = 0; | 1085 | conntrack->proto.tcp.seen[1].td_end = 0; |
1086 | conntrack->proto.tcp.seen[1].td_maxend = 0; | 1086 | conntrack->proto.tcp.seen[1].td_maxend = 0; |
1087 | conntrack->proto.tcp.seen[1].td_maxwin = 1; | 1087 | conntrack->proto.tcp.seen[1].td_maxwin = 1; |
1088 | conntrack->proto.tcp.seen[1].td_scale = 0; | 1088 | conntrack->proto.tcp.seen[1].td_scale = 0; |
1089 | 1089 | ||
1090 | /* tcp_packet will set them */ | 1090 | /* tcp_packet will set them */ |
1091 | conntrack->proto.tcp.state = TCP_CONNTRACK_NONE; | 1091 | conntrack->proto.tcp.state = TCP_CONNTRACK_NONE; |
1092 | conntrack->proto.tcp.last_index = TCP_NONE_SET; | 1092 | conntrack->proto.tcp.last_index = TCP_NONE_SET; |
1093 | 1093 | ||
1094 | DEBUGP("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i " | 1094 | DEBUGP("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i " |
1095 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", | 1095 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", |
1096 | sender->td_end, sender->td_maxend, sender->td_maxwin, | 1096 | sender->td_end, sender->td_maxend, sender->td_maxwin, |
1097 | sender->td_scale, | 1097 | sender->td_scale, |
1098 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, | 1098 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, |
1099 | receiver->td_scale); | 1099 | receiver->td_scale); |
1100 | return 1; | 1100 | return 1; |
@@ -1110,7 +1110,7 @@ static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa, | |||
1110 | const struct nf_conn *ct) | 1110 | const struct nf_conn *ct) |
1111 | { | 1111 | { |
1112 | struct nfattr *nest_parms; | 1112 | struct nfattr *nest_parms; |
1113 | 1113 | ||
1114 | read_lock_bh(&tcp_lock); | 1114 | read_lock_bh(&tcp_lock); |
1115 | nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP); | 1115 | nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP); |
1116 | NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t), | 1116 | NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t), |
@@ -1140,7 +1140,7 @@ static int nfattr_to_tcp(struct nfattr *cda[], struct nf_conn *ct) | |||
1140 | if (!attr) | 1140 | if (!attr) |
1141 | return 0; | 1141 | return 0; |
1142 | 1142 | ||
1143 | nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr); | 1143 | nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr); |
1144 | 1144 | ||
1145 | if (nfattr_bad_size(tb, CTA_PROTOINFO_TCP_MAX, cta_min_tcp)) | 1145 | if (nfattr_bad_size(tb, CTA_PROTOINFO_TCP_MAX, cta_min_tcp)) |
1146 | return -EINVAL; | 1146 | return -EINVAL; |
@@ -1149,7 +1149,7 @@ static int nfattr_to_tcp(struct nfattr *cda[], struct nf_conn *ct) | |||
1149 | return -EINVAL; | 1149 | return -EINVAL; |
1150 | 1150 | ||
1151 | write_lock_bh(&tcp_lock); | 1151 | write_lock_bh(&tcp_lock); |
1152 | ct->proto.tcp.state = | 1152 | ct->proto.tcp.state = |
1153 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]); | 1153 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]); |
1154 | write_unlock_bh(&tcp_lock); | 1154 | write_unlock_bh(&tcp_lock); |
1155 | 1155 | ||
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 9dec11534678..7aaa8c91b293 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
@@ -341,7 +341,7 @@ int ct_sip_get_info(struct nf_conn *ct, | |||
341 | continue; | 341 | continue; |
342 | } | 342 | } |
343 | aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen, | 343 | aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen, |
344 | ct_sip_lnlen(dptr, limit), | 344 | ct_sip_lnlen(dptr, limit), |
345 | hnfo->case_sensitive); | 345 | hnfo->case_sensitive); |
346 | if (!aux) { | 346 | if (!aux) { |
347 | DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str, | 347 | DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str, |
@@ -451,12 +451,12 @@ static int sip_help(struct sk_buff **pskb, | |||
451 | 451 | ||
452 | /* We'll drop only if there are parse problems. */ | 452 | /* We'll drop only if there are parse problems. */ |
453 | if (!parse_addr(ct, dptr + matchoff, NULL, &addr, | 453 | if (!parse_addr(ct, dptr + matchoff, NULL, &addr, |
454 | dptr + datalen)) { | 454 | dptr + datalen)) { |
455 | ret = NF_DROP; | 455 | ret = NF_DROP; |
456 | goto out; | 456 | goto out; |
457 | } | 457 | } |
458 | if (ct_sip_get_info(ct, dptr, datalen, &matchoff, &matchlen, | 458 | if (ct_sip_get_info(ct, dptr, datalen, &matchoff, &matchlen, |
459 | POS_MEDIA) > 0) { | 459 | POS_MEDIA) > 0) { |
460 | 460 | ||
461 | port = simple_strtoul(dptr + matchoff, NULL, 10); | 461 | port = simple_strtoul(dptr + matchoff, NULL, 10); |
462 | if (port < 1024) { | 462 | if (port < 1024) { |
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 04ac12431db7..a0bba481d70d 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
@@ -472,7 +472,7 @@ static int __init nf_conntrack_standalone_init(void) | |||
472 | static void __exit nf_conntrack_standalone_fini(void) | 472 | static void __exit nf_conntrack_standalone_fini(void) |
473 | { | 473 | { |
474 | #ifdef CONFIG_SYSCTL | 474 | #ifdef CONFIG_SYSCTL |
475 | unregister_sysctl_table(nf_ct_sysctl_header); | 475 | unregister_sysctl_table(nf_ct_sysctl_header); |
476 | #endif | 476 | #endif |
477 | #ifdef CONFIG_PROC_FS | 477 | #ifdef CONFIG_PROC_FS |
478 | remove_proc_entry("nf_conntrack", proc_net_stat); | 478 | remove_proc_entry("nf_conntrack", proc_net_stat); |
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c index f5bffe24b0a5..37c4542e3112 100644 --- a/net/netfilter/nf_conntrack_tftp.c +++ b/net/netfilter/nf_conntrack_tftp.c | |||
@@ -31,7 +31,7 @@ MODULE_PARM_DESC(ports, "Port numbers of TFTP servers"); | |||
31 | 31 | ||
32 | #if 0 | 32 | #if 0 |
33 | #define DEBUGP(format, args...) printk("%s:%s:" format, \ | 33 | #define DEBUGP(format, args...) printk("%s:%s:" format, \ |
34 | __FILE__, __FUNCTION__ , ## args) | 34 | __FILE__, __FUNCTION__ , ## args) |
35 | #else | 35 | #else |
36 | #define DEBUGP(format, args...) | 36 | #define DEBUGP(format, args...) |
37 | #endif | 37 | #endif |
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h index a981971ce1d5..0df7fff196a7 100644 --- a/net/netfilter/nf_internals.h +++ b/net/netfilter/nf_internals.h | |||
@@ -24,7 +24,7 @@ extern unsigned int nf_iterate(struct list_head *head, | |||
24 | 24 | ||
25 | /* nf_queue.c */ | 25 | /* nf_queue.c */ |
26 | extern int nf_queue(struct sk_buff *skb, | 26 | extern int nf_queue(struct sk_buff *skb, |
27 | struct list_head *elem, | 27 | struct list_head *elem, |
28 | int pf, unsigned int hook, | 28 | int pf, unsigned int hook, |
29 | struct net_device *indev, | 29 | struct net_device *indev, |
30 | struct net_device *outdev, | 30 | struct net_device *outdev, |
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 07e28e089616..91b220cf5a1f 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
@@ -14,62 +14,63 @@ | |||
14 | 14 | ||
15 | #define NF_LOG_PREFIXLEN 128 | 15 | #define NF_LOG_PREFIXLEN 128 |
16 | 16 | ||
17 | static struct nf_logger *nf_logging[NPROTO]; /* = NULL */ | 17 | static struct nf_logger *nf_loggers[NPROTO]; |
18 | static DEFINE_SPINLOCK(nf_log_lock); | 18 | static DEFINE_MUTEX(nf_log_mutex); |
19 | 19 | ||
20 | /* return EBUSY if somebody else is registered, EEXIST if the same logger | 20 | /* return EBUSY if somebody else is registered, EEXIST if the same logger |
21 | * is registred, 0 on success. */ | 21 | * is registred, 0 on success. */ |
22 | int nf_log_register(int pf, struct nf_logger *logger) | 22 | int nf_log_register(int pf, struct nf_logger *logger) |
23 | { | 23 | { |
24 | int ret = -EBUSY; | 24 | int ret; |
25 | 25 | ||
26 | if (pf >= NPROTO) | 26 | if (pf >= NPROTO) |
27 | return -EINVAL; | 27 | return -EINVAL; |
28 | 28 | ||
29 | /* Any setup of logging members must be done before | 29 | /* Any setup of logging members must be done before |
30 | * substituting pointer. */ | 30 | * substituting pointer. */ |
31 | spin_lock(&nf_log_lock); | 31 | ret = mutex_lock_interruptible(&nf_log_mutex); |
32 | if (!nf_logging[pf]) { | 32 | if (ret < 0) |
33 | rcu_assign_pointer(nf_logging[pf], logger); | 33 | return ret; |
34 | ret = 0; | 34 | |
35 | } else if (nf_logging[pf] == logger) | 35 | if (!nf_loggers[pf]) |
36 | rcu_assign_pointer(nf_loggers[pf], logger); | ||
37 | else if (nf_loggers[pf] == logger) | ||
36 | ret = -EEXIST; | 38 | ret = -EEXIST; |
39 | else | ||
40 | ret = -EBUSY; | ||
37 | 41 | ||
38 | spin_unlock(&nf_log_lock); | 42 | mutex_unlock(&nf_log_mutex); |
39 | return ret; | 43 | return ret; |
40 | } | 44 | } |
41 | EXPORT_SYMBOL(nf_log_register); | 45 | EXPORT_SYMBOL(nf_log_register); |
42 | 46 | ||
43 | int nf_log_unregister_pf(int pf) | 47 | void nf_log_unregister_pf(int pf) |
44 | { | 48 | { |
45 | if (pf >= NPROTO) | 49 | if (pf >= NPROTO) |
46 | return -EINVAL; | 50 | return; |
47 | 51 | mutex_lock(&nf_log_mutex); | |
48 | spin_lock(&nf_log_lock); | 52 | rcu_assign_pointer(nf_loggers[pf], NULL); |
49 | nf_logging[pf] = NULL; | 53 | mutex_unlock(&nf_log_mutex); |
50 | spin_unlock(&nf_log_lock); | ||
51 | 54 | ||
52 | /* Give time to concurrent readers. */ | 55 | /* Give time to concurrent readers. */ |
53 | synchronize_net(); | 56 | synchronize_rcu(); |
54 | |||
55 | return 0; | ||
56 | } | 57 | } |
57 | EXPORT_SYMBOL(nf_log_unregister_pf); | 58 | EXPORT_SYMBOL(nf_log_unregister_pf); |
58 | 59 | ||
59 | void nf_log_unregister_logger(struct nf_logger *logger) | 60 | void nf_log_unregister(struct nf_logger *logger) |
60 | { | 61 | { |
61 | int i; | 62 | int i; |
62 | 63 | ||
63 | spin_lock(&nf_log_lock); | 64 | mutex_lock(&nf_log_mutex); |
64 | for (i = 0; i < NPROTO; i++) { | 65 | for (i = 0; i < NPROTO; i++) { |
65 | if (nf_logging[i] == logger) | 66 | if (nf_loggers[i] == logger) |
66 | nf_logging[i] = NULL; | 67 | rcu_assign_pointer(nf_loggers[i], NULL); |
67 | } | 68 | } |
68 | spin_unlock(&nf_log_lock); | 69 | mutex_unlock(&nf_log_mutex); |
69 | 70 | ||
70 | synchronize_net(); | 71 | synchronize_rcu(); |
71 | } | 72 | } |
72 | EXPORT_SYMBOL(nf_log_unregister_logger); | 73 | EXPORT_SYMBOL(nf_log_unregister); |
73 | 74 | ||
74 | void nf_log_packet(int pf, | 75 | void nf_log_packet(int pf, |
75 | unsigned int hooknum, | 76 | unsigned int hooknum, |
@@ -82,9 +83,9 @@ void nf_log_packet(int pf, | |||
82 | va_list args; | 83 | va_list args; |
83 | char prefix[NF_LOG_PREFIXLEN]; | 84 | char prefix[NF_LOG_PREFIXLEN]; |
84 | struct nf_logger *logger; | 85 | struct nf_logger *logger; |
85 | 86 | ||
86 | rcu_read_lock(); | 87 | rcu_read_lock(); |
87 | logger = rcu_dereference(nf_logging[pf]); | 88 | logger = rcu_dereference(nf_loggers[pf]); |
88 | if (logger) { | 89 | if (logger) { |
89 | va_start(args, fmt); | 90 | va_start(args, fmt); |
90 | vsnprintf(prefix, sizeof(prefix), fmt, args); | 91 | vsnprintf(prefix, sizeof(prefix), fmt, args); |
@@ -131,11 +132,11 @@ static int seq_show(struct seq_file *s, void *v) | |||
131 | loff_t *pos = v; | 132 | loff_t *pos = v; |
132 | const struct nf_logger *logger; | 133 | const struct nf_logger *logger; |
133 | 134 | ||
134 | logger = rcu_dereference(nf_logging[*pos]); | 135 | logger = rcu_dereference(nf_loggers[*pos]); |
135 | 136 | ||
136 | if (!logger) | 137 | if (!logger) |
137 | return seq_printf(s, "%2lld NONE\n", *pos); | 138 | return seq_printf(s, "%2lld NONE\n", *pos); |
138 | 139 | ||
139 | return seq_printf(s, "%2lld %s\n", *pos, logger->name); | 140 | return seq_printf(s, "%2lld %s\n", *pos, logger->name); |
140 | } | 141 | } |
141 | 142 | ||
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index e136fea1db22..b1f2ace96f6d 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | #include "nf_internals.h" | 11 | #include "nf_internals.h" |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * A queue handler may be registered for each protocol. Each is protected by | 14 | * A queue handler may be registered for each protocol. Each is protected by |
15 | * long term mutex. The handler must provide an an outfn() to accept packets | 15 | * long term mutex. The handler must provide an an outfn() to accept packets |
16 | * for queueing and must reinject all packets it receives, no matter what. | 16 | * for queueing and must reinject all packets it receives, no matter what. |
@@ -22,7 +22,7 @@ static DEFINE_RWLOCK(queue_handler_lock); | |||
22 | /* return EBUSY when somebody else is registered, return EEXIST if the | 22 | /* return EBUSY when somebody else is registered, return EEXIST if the |
23 | * same handler is registered, return 0 in case of success. */ | 23 | * same handler is registered, return 0 in case of success. */ |
24 | int nf_register_queue_handler(int pf, struct nf_queue_handler *qh) | 24 | int nf_register_queue_handler(int pf, struct nf_queue_handler *qh) |
25 | { | 25 | { |
26 | int ret; | 26 | int ret; |
27 | 27 | ||
28 | if (pf >= NPROTO) | 28 | if (pf >= NPROTO) |
@@ -52,7 +52,7 @@ int nf_unregister_queue_handler(int pf) | |||
52 | write_lock_bh(&queue_handler_lock); | 52 | write_lock_bh(&queue_handler_lock); |
53 | queue_handler[pf] = NULL; | 53 | queue_handler[pf] = NULL; |
54 | write_unlock_bh(&queue_handler_lock); | 54 | write_unlock_bh(&queue_handler_lock); |
55 | 55 | ||
56 | return 0; | 56 | return 0; |
57 | } | 57 | } |
58 | EXPORT_SYMBOL(nf_unregister_queue_handler); | 58 | EXPORT_SYMBOL(nf_unregister_queue_handler); |
@@ -70,8 +70,8 @@ void nf_unregister_queue_handlers(struct nf_queue_handler *qh) | |||
70 | } | 70 | } |
71 | EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers); | 71 | EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers); |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * Any packet that leaves via this function must come back | 74 | * Any packet that leaves via this function must come back |
75 | * through nf_reinject(). | 75 | * through nf_reinject(). |
76 | */ | 76 | */ |
77 | static int __nf_queue(struct sk_buff *skb, | 77 | static int __nf_queue(struct sk_buff *skb, |
@@ -115,7 +115,7 @@ static int __nf_queue(struct sk_buff *skb, | |||
115 | return 1; | 115 | return 1; |
116 | } | 116 | } |
117 | 117 | ||
118 | *info = (struct nf_info) { | 118 | *info = (struct nf_info) { |
119 | (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn }; | 119 | (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn }; |
120 | 120 | ||
121 | /* If it's going away, ignore hook. */ | 121 | /* If it's going away, ignore hook. */ |
@@ -226,10 +226,10 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info, | |||
226 | module_put(info->elem->owner); | 226 | module_put(info->elem->owner); |
227 | 227 | ||
228 | list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) { | 228 | list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) { |
229 | if (i == elem) | 229 | if (i == elem) |
230 | break; | 230 | break; |
231 | } | 231 | } |
232 | 232 | ||
233 | if (i == &nf_hooks[info->pf][info->hook]) { | 233 | if (i == &nf_hooks[info->pf][info->hook]) { |
234 | /* The module which sent it to userspace is gone. */ | 234 | /* The module which sent it to userspace is gone. */ |
235 | NFDEBUG("%s: module disappeared, dropping packet.\n", | 235 | NFDEBUG("%s: module disappeared, dropping packet.\n", |
@@ -252,7 +252,7 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info, | |||
252 | if (verdict == NF_ACCEPT) { | 252 | if (verdict == NF_ACCEPT) { |
253 | next_hook: | 253 | next_hook: |
254 | verdict = nf_iterate(&nf_hooks[info->pf][info->hook], | 254 | verdict = nf_iterate(&nf_hooks[info->pf][info->hook], |
255 | &skb, info->hook, | 255 | &skb, info->hook, |
256 | info->indev, info->outdev, &elem, | 256 | info->indev, info->outdev, &elem, |
257 | info->okfn, INT_MIN); | 257 | info->okfn, INT_MIN); |
258 | } | 258 | } |
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c index c2e44e90e437..8b8ece750313 100644 --- a/net/netfilter/nf_sockopt.c +++ b/net/netfilter/nf_sockopt.c | |||
@@ -32,13 +32,13 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg) | |||
32 | list_for_each(i, &nf_sockopts) { | 32 | list_for_each(i, &nf_sockopts) { |
33 | struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i; | 33 | struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i; |
34 | if (ops->pf == reg->pf | 34 | if (ops->pf == reg->pf |
35 | && (overlap(ops->set_optmin, ops->set_optmax, | 35 | && (overlap(ops->set_optmin, ops->set_optmax, |
36 | reg->set_optmin, reg->set_optmax) | 36 | reg->set_optmin, reg->set_optmax) |
37 | || overlap(ops->get_optmin, ops->get_optmax, | 37 | || overlap(ops->get_optmin, ops->get_optmax, |
38 | reg->get_optmin, reg->get_optmax))) { | 38 | reg->get_optmin, reg->get_optmax))) { |
39 | NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n", | 39 | NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n", |
40 | ops->set_optmin, ops->set_optmax, | 40 | ops->set_optmin, ops->set_optmax, |
41 | ops->get_optmin, ops->get_optmax, | 41 | ops->get_optmin, ops->get_optmax, |
42 | reg->set_optmin, reg->set_optmax, | 42 | reg->set_optmin, reg->set_optmax, |
43 | reg->get_optmin, reg->get_optmax); | 43 | reg->get_optmin, reg->get_optmax); |
44 | ret = -EBUSY; | 44 | ret = -EBUSY; |
@@ -73,7 +73,7 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg) | |||
73 | EXPORT_SYMBOL(nf_unregister_sockopt); | 73 | EXPORT_SYMBOL(nf_unregister_sockopt); |
74 | 74 | ||
75 | /* Call get/setsockopt() */ | 75 | /* Call get/setsockopt() */ |
76 | static int nf_sockopt(struct sock *sk, int pf, int val, | 76 | static int nf_sockopt(struct sock *sk, int pf, int val, |
77 | char __user *opt, int *len, int get) | 77 | char __user *opt, int *len, int get) |
78 | { | 78 | { |
79 | struct list_head *i; | 79 | struct list_head *i; |
@@ -107,7 +107,7 @@ static int nf_sockopt(struct sock *sk, int pf, int val, | |||
107 | } | 107 | } |
108 | mutex_unlock(&nf_sockopt_mutex); | 108 | mutex_unlock(&nf_sockopt_mutex); |
109 | return -ENOPROTOOPT; | 109 | return -ENOPROTOOPT; |
110 | 110 | ||
111 | out: | 111 | out: |
112 | mutex_lock(&nf_sockopt_mutex); | 112 | mutex_lock(&nf_sockopt_mutex); |
113 | ops->use--; | 113 | ops->use--; |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 52fdfa2686c9..f42bb1366007 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -105,7 +105,7 @@ static inline struct nfnl_callback * | |||
105 | nfnetlink_find_client(u_int16_t type, struct nfnetlink_subsystem *ss) | 105 | nfnetlink_find_client(u_int16_t type, struct nfnetlink_subsystem *ss) |
106 | { | 106 | { |
107 | u_int8_t cb_id = NFNL_MSG_TYPE(type); | 107 | u_int8_t cb_id = NFNL_MSG_TYPE(type); |
108 | 108 | ||
109 | if (cb_id >= ss->cb_count) { | 109 | if (cb_id >= ss->cb_count) { |
110 | DEBUGP("msgtype %u >= %u, returning\n", type, ss->cb_count); | 110 | DEBUGP("msgtype %u >= %u, returning\n", type, ss->cb_count); |
111 | return NULL; | 111 | return NULL; |
@@ -187,7 +187,7 @@ nfnetlink_check_attributes(struct nfnetlink_subsystem *subsys, | |||
187 | /* implicit: if nlmsg_len == min_len, we return 0, and an empty | 187 | /* implicit: if nlmsg_len == min_len, we return 0, and an empty |
188 | * (zeroed) cda[] array. The message is valid, but empty. */ | 188 | * (zeroed) cda[] array. The message is valid, but empty. */ |
189 | 189 | ||
190 | return 0; | 190 | return 0; |
191 | } | 191 | } |
192 | 192 | ||
193 | int nfnetlink_has_listeners(unsigned int group) | 193 | int nfnetlink_has_listeners(unsigned int group) |
@@ -268,12 +268,12 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, | |||
268 | } | 268 | } |
269 | 269 | ||
270 | { | 270 | { |
271 | u_int16_t attr_count = | 271 | u_int16_t attr_count = |
272 | ss->cb[NFNL_MSG_TYPE(nlh->nlmsg_type)].attr_count; | 272 | ss->cb[NFNL_MSG_TYPE(nlh->nlmsg_type)].attr_count; |
273 | struct nfattr *cda[attr_count]; | 273 | struct nfattr *cda[attr_count]; |
274 | 274 | ||
275 | memset(cda, 0, sizeof(struct nfattr *) * attr_count); | 275 | memset(cda, 0, sizeof(struct nfattr *) * attr_count); |
276 | 276 | ||
277 | err = nfnetlink_check_attributes(ss, nlh, cda); | 277 | err = nfnetlink_check_attributes(ss, nlh, cda); |
278 | if (err < 0) | 278 | if (err < 0) |
279 | goto err_inval; | 279 | goto err_inval; |
@@ -357,7 +357,7 @@ static int __init nfnetlink_init(void) | |||
357 | printk("Netfilter messages via NETLINK v%s.\n", nfversion); | 357 | printk("Netfilter messages via NETLINK v%s.\n", nfversion); |
358 | 358 | ||
359 | nfnl = netlink_kernel_create(NETLINK_NETFILTER, NFNLGRP_MAX, | 359 | nfnl = netlink_kernel_create(NETLINK_NETFILTER, NFNLGRP_MAX, |
360 | nfnetlink_rcv, THIS_MODULE); | 360 | nfnetlink_rcv, THIS_MODULE); |
361 | if (!nfnl) { | 361 | if (!nfnl) { |
362 | printk(KERN_ERR "cannot initialize nfnetlink!\n"); | 362 | printk(KERN_ERR "cannot initialize nfnetlink!\n"); |
363 | return -1; | 363 | return -1; |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index c47e7e2ba642..b8eab0dbc3dd 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -75,7 +75,7 @@ struct nfulnl_instance { | |||
75 | u_int32_t seq; /* instance-local sequential counter */ | 75 | u_int32_t seq; /* instance-local sequential counter */ |
76 | u_int16_t group_num; /* number of this queue */ | 76 | u_int16_t group_num; /* number of this queue */ |
77 | u_int16_t flags; | 77 | u_int16_t flags; |
78 | u_int8_t copy_mode; | 78 | u_int8_t copy_mode; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | static DEFINE_RWLOCK(instances_lock); | 81 | static DEFINE_RWLOCK(instances_lock); |
@@ -146,7 +146,7 @@ instance_create(u_int16_t group_num, int pid) | |||
146 | UDEBUG("entering (group_num=%u, pid=%d)\n", group_num, | 146 | UDEBUG("entering (group_num=%u, pid=%d)\n", group_num, |
147 | pid); | 147 | pid); |
148 | 148 | ||
149 | write_lock_bh(&instances_lock); | 149 | write_lock_bh(&instances_lock); |
150 | if (__instance_lookup(group_num)) { | 150 | if (__instance_lookup(group_num)) { |
151 | inst = NULL; | 151 | inst = NULL; |
152 | UDEBUG("aborting, instance already exists\n"); | 152 | UDEBUG("aborting, instance already exists\n"); |
@@ -179,10 +179,10 @@ instance_create(u_int16_t group_num, int pid) | |||
179 | if (!try_module_get(THIS_MODULE)) | 179 | if (!try_module_get(THIS_MODULE)) |
180 | goto out_free; | 180 | goto out_free; |
181 | 181 | ||
182 | hlist_add_head(&inst->hlist, | 182 | hlist_add_head(&inst->hlist, |
183 | &instance_table[instance_hashfn(group_num)]); | 183 | &instance_table[instance_hashfn(group_num)]); |
184 | 184 | ||
185 | UDEBUG("newly added node: %p, next=%p\n", &inst->hlist, | 185 | UDEBUG("newly added node: %p, next=%p\n", &inst->hlist, |
186 | inst->hlist.next); | 186 | inst->hlist.next); |
187 | 187 | ||
188 | write_unlock_bh(&instances_lock); | 188 | write_unlock_bh(&instances_lock); |
@@ -251,14 +251,14 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode, | |||
251 | int status = 0; | 251 | int status = 0; |
252 | 252 | ||
253 | spin_lock_bh(&inst->lock); | 253 | spin_lock_bh(&inst->lock); |
254 | 254 | ||
255 | switch (mode) { | 255 | switch (mode) { |
256 | case NFULNL_COPY_NONE: | 256 | case NFULNL_COPY_NONE: |
257 | case NFULNL_COPY_META: | 257 | case NFULNL_COPY_META: |
258 | inst->copy_mode = mode; | 258 | inst->copy_mode = mode; |
259 | inst->copy_range = 0; | 259 | inst->copy_range = 0; |
260 | break; | 260 | break; |
261 | 261 | ||
262 | case NFULNL_COPY_PACKET: | 262 | case NFULNL_COPY_PACKET: |
263 | inst->copy_mode = mode; | 263 | inst->copy_mode = mode; |
264 | /* we're using struct nfattr which has 16bit nfa_len */ | 264 | /* we're using struct nfattr which has 16bit nfa_len */ |
@@ -267,7 +267,7 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode, | |||
267 | else | 267 | else |
268 | inst->copy_range = range; | 268 | inst->copy_range = range; |
269 | break; | 269 | break; |
270 | 270 | ||
271 | default: | 271 | default: |
272 | status = -EINVAL; | 272 | status = -EINVAL; |
273 | break; | 273 | break; |
@@ -327,7 +327,7 @@ nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags) | |||
327 | return 0; | 327 | return 0; |
328 | } | 328 | } |
329 | 329 | ||
330 | static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size, | 330 | static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size, |
331 | unsigned int pkt_size) | 331 | unsigned int pkt_size) |
332 | { | 332 | { |
333 | struct sk_buff *skb; | 333 | struct sk_buff *skb; |
@@ -387,7 +387,7 @@ __nfulnl_send(struct nfulnl_instance *inst) | |||
387 | 387 | ||
388 | static void nfulnl_timer(unsigned long data) | 388 | static void nfulnl_timer(unsigned long data) |
389 | { | 389 | { |
390 | struct nfulnl_instance *inst = (struct nfulnl_instance *)data; | 390 | struct nfulnl_instance *inst = (struct nfulnl_instance *)data; |
391 | 391 | ||
392 | UDEBUG("timer function called, flushing buffer\n"); | 392 | UDEBUG("timer function called, flushing buffer\n"); |
393 | 393 | ||
@@ -399,9 +399,9 @@ static void nfulnl_timer(unsigned long data) | |||
399 | 399 | ||
400 | /* This is an inline function, we don't really care about a long | 400 | /* This is an inline function, we don't really care about a long |
401 | * list of arguments */ | 401 | * list of arguments */ |
402 | static inline int | 402 | static inline int |
403 | __build_packet_message(struct nfulnl_instance *inst, | 403 | __build_packet_message(struct nfulnl_instance *inst, |
404 | const struct sk_buff *skb, | 404 | const struct sk_buff *skb, |
405 | unsigned int data_len, | 405 | unsigned int data_len, |
406 | unsigned int pf, | 406 | unsigned int pf, |
407 | unsigned int hooknum, | 407 | unsigned int hooknum, |
@@ -417,9 +417,9 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
417 | __be32 tmp_uint; | 417 | __be32 tmp_uint; |
418 | 418 | ||
419 | UDEBUG("entered\n"); | 419 | UDEBUG("entered\n"); |
420 | 420 | ||
421 | old_tail = inst->skb->tail; | 421 | old_tail = inst->skb->tail; |
422 | nlh = NLMSG_PUT(inst->skb, 0, 0, | 422 | nlh = NLMSG_PUT(inst->skb, 0, 0, |
423 | NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, | 423 | NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, |
424 | sizeof(struct nfgenmsg)); | 424 | sizeof(struct nfgenmsg)); |
425 | nfmsg = NLMSG_DATA(nlh); | 425 | nfmsg = NLMSG_DATA(nlh); |
@@ -457,7 +457,7 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
457 | NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV, | 457 | NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV, |
458 | sizeof(tmp_uint), &tmp_uint); | 458 | sizeof(tmp_uint), &tmp_uint); |
459 | if (skb->nf_bridge && skb->nf_bridge->physindev) { | 459 | if (skb->nf_bridge && skb->nf_bridge->physindev) { |
460 | tmp_uint = | 460 | tmp_uint = |
461 | htonl(skb->nf_bridge->physindev->ifindex); | 461 | htonl(skb->nf_bridge->physindev->ifindex); |
462 | NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV, | 462 | NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV, |
463 | sizeof(tmp_uint), &tmp_uint); | 463 | sizeof(tmp_uint), &tmp_uint); |
@@ -488,7 +488,7 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
488 | NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, | 488 | NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, |
489 | sizeof(tmp_uint), &tmp_uint); | 489 | sizeof(tmp_uint), &tmp_uint); |
490 | if (skb->nf_bridge) { | 490 | if (skb->nf_bridge) { |
491 | tmp_uint = | 491 | tmp_uint = |
492 | htonl(skb->nf_bridge->physoutdev->ifindex); | 492 | htonl(skb->nf_bridge->physoutdev->ifindex); |
493 | NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, | 493 | NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, |
494 | sizeof(tmp_uint), &tmp_uint); | 494 | sizeof(tmp_uint), &tmp_uint); |
@@ -558,7 +558,7 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
558 | if (skb_copy_bits(skb, 0, NFA_DATA(nfa), data_len)) | 558 | if (skb_copy_bits(skb, 0, NFA_DATA(nfa), data_len)) |
559 | BUG(); | 559 | BUG(); |
560 | } | 560 | } |
561 | 561 | ||
562 | nlh->nlmsg_len = inst->skb->tail - old_tail; | 562 | nlh->nlmsg_len = inst->skb->tail - old_tail; |
563 | return 0; | 563 | return 0; |
564 | 564 | ||
@@ -599,7 +599,7 @@ nfulnl_log_packet(unsigned int pf, | |||
599 | unsigned int nlbufsiz; | 599 | unsigned int nlbufsiz; |
600 | unsigned int plen; | 600 | unsigned int plen; |
601 | 601 | ||
602 | if (li_user && li_user->type == NF_LOG_TYPE_ULOG) | 602 | if (li_user && li_user->type == NF_LOG_TYPE_ULOG) |
603 | li = li_user; | 603 | li = li_user; |
604 | else | 604 | else |
605 | li = &default_loginfo; | 605 | li = &default_loginfo; |
@@ -648,24 +648,24 @@ nfulnl_log_packet(unsigned int pf, | |||
648 | /* per-rule qthreshold overrides per-instance */ | 648 | /* per-rule qthreshold overrides per-instance */ |
649 | if (qthreshold > li->u.ulog.qthreshold) | 649 | if (qthreshold > li->u.ulog.qthreshold) |
650 | qthreshold = li->u.ulog.qthreshold; | 650 | qthreshold = li->u.ulog.qthreshold; |
651 | 651 | ||
652 | switch (inst->copy_mode) { | 652 | switch (inst->copy_mode) { |
653 | case NFULNL_COPY_META: | 653 | case NFULNL_COPY_META: |
654 | case NFULNL_COPY_NONE: | 654 | case NFULNL_COPY_NONE: |
655 | data_len = 0; | 655 | data_len = 0; |
656 | break; | 656 | break; |
657 | 657 | ||
658 | case NFULNL_COPY_PACKET: | 658 | case NFULNL_COPY_PACKET: |
659 | if (inst->copy_range == 0 | 659 | if (inst->copy_range == 0 |
660 | || inst->copy_range > skb->len) | 660 | || inst->copy_range > skb->len) |
661 | data_len = skb->len; | 661 | data_len = skb->len; |
662 | else | 662 | else |
663 | data_len = inst->copy_range; | 663 | data_len = inst->copy_range; |
664 | 664 | ||
665 | size += NFA_SPACE(data_len); | 665 | size += NFA_SPACE(data_len); |
666 | UDEBUG("copy_packet, therefore size now %u\n", size); | 666 | UDEBUG("copy_packet, therefore size now %u\n", size); |
667 | break; | 667 | break; |
668 | 668 | ||
669 | default: | 669 | default: |
670 | spin_unlock_bh(&inst->lock); | 670 | spin_unlock_bh(&inst->lock); |
671 | instance_put(inst); | 671 | instance_put(inst); |
@@ -991,9 +991,9 @@ static int seq_show(struct seq_file *s, void *v) | |||
991 | { | 991 | { |
992 | const struct nfulnl_instance *inst = v; | 992 | const struct nfulnl_instance *inst = v; |
993 | 993 | ||
994 | return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n", | 994 | return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n", |
995 | inst->group_num, | 995 | inst->group_num, |
996 | inst->peer_pid, inst->qlen, | 996 | inst->peer_pid, inst->qlen, |
997 | inst->copy_mode, inst->copy_range, | 997 | inst->copy_mode, inst->copy_range, |
998 | inst->flushtimeout, atomic_read(&inst->use)); | 998 | inst->flushtimeout, atomic_read(&inst->use)); |
999 | } | 999 | } |
@@ -1041,10 +1041,10 @@ static int __init nfnetlink_log_init(void) | |||
1041 | #ifdef CONFIG_PROC_FS | 1041 | #ifdef CONFIG_PROC_FS |
1042 | struct proc_dir_entry *proc_nful; | 1042 | struct proc_dir_entry *proc_nful; |
1043 | #endif | 1043 | #endif |
1044 | 1044 | ||
1045 | for (i = 0; i < INSTANCE_BUCKETS; i++) | 1045 | for (i = 0; i < INSTANCE_BUCKETS; i++) |
1046 | INIT_HLIST_HEAD(&instance_table[i]); | 1046 | INIT_HLIST_HEAD(&instance_table[i]); |
1047 | 1047 | ||
1048 | /* it's not really all that important to have a random value, so | 1048 | /* it's not really all that important to have a random value, so |
1049 | * we can do this from the init function, even if there hasn't | 1049 | * we can do this from the init function, even if there hasn't |
1050 | * been that much entropy yet */ | 1050 | * been that much entropy yet */ |
@@ -1077,7 +1077,7 @@ cleanup_netlink_notifier: | |||
1077 | 1077 | ||
1078 | static void __exit nfnetlink_log_fini(void) | 1078 | static void __exit nfnetlink_log_fini(void) |
1079 | { | 1079 | { |
1080 | nf_log_unregister_logger(&nfulnl_logger); | 1080 | nf_log_unregister(&nfulnl_logger); |
1081 | #ifdef CONFIG_PROC_FS | 1081 | #ifdef CONFIG_PROC_FS |
1082 | remove_proc_entry("nfnetlink_log", proc_net_netfilter); | 1082 | remove_proc_entry("nfnetlink_log", proc_net_netfilter); |
1083 | #endif | 1083 | #endif |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 99e516eca41a..d9ce4a71d0f3 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -129,7 +129,7 @@ instance_create(u_int16_t queue_num, int pid) | |||
129 | 129 | ||
130 | QDEBUG("entering for queue_num=%u, pid=%d\n", queue_num, pid); | 130 | QDEBUG("entering for queue_num=%u, pid=%d\n", queue_num, pid); |
131 | 131 | ||
132 | write_lock_bh(&instances_lock); | 132 | write_lock_bh(&instances_lock); |
133 | if (__instance_lookup(queue_num)) { | 133 | if (__instance_lookup(queue_num)) { |
134 | inst = NULL; | 134 | inst = NULL; |
135 | QDEBUG("aborting, instance already exists\n"); | 135 | QDEBUG("aborting, instance already exists\n"); |
@@ -154,7 +154,7 @@ instance_create(u_int16_t queue_num, int pid) | |||
154 | if (!try_module_get(THIS_MODULE)) | 154 | if (!try_module_get(THIS_MODULE)) |
155 | goto out_free; | 155 | goto out_free; |
156 | 156 | ||
157 | hlist_add_head(&inst->hlist, | 157 | hlist_add_head(&inst->hlist, |
158 | &instance_table[instance_hashfn(queue_num)]); | 158 | &instance_table[instance_hashfn(queue_num)]); |
159 | 159 | ||
160 | write_unlock_bh(&instances_lock); | 160 | write_unlock_bh(&instances_lock); |
@@ -239,14 +239,14 @@ __enqueue_entry(struct nfqnl_instance *queue, | |||
239 | * entry if cmpfn is NULL. | 239 | * entry if cmpfn is NULL. |
240 | */ | 240 | */ |
241 | static inline struct nfqnl_queue_entry * | 241 | static inline struct nfqnl_queue_entry * |
242 | __find_entry(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, | 242 | __find_entry(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, |
243 | unsigned long data) | 243 | unsigned long data) |
244 | { | 244 | { |
245 | struct list_head *p; | 245 | struct list_head *p; |
246 | 246 | ||
247 | list_for_each_prev(p, &queue->queue_list) { | 247 | list_for_each_prev(p, &queue->queue_list) { |
248 | struct nfqnl_queue_entry *entry = (struct nfqnl_queue_entry *)p; | 248 | struct nfqnl_queue_entry *entry = (struct nfqnl_queue_entry *)p; |
249 | 249 | ||
250 | if (!cmpfn || cmpfn(entry, data)) | 250 | if (!cmpfn || cmpfn(entry, data)) |
251 | return entry; | 251 | return entry; |
252 | } | 252 | } |
@@ -279,7 +279,7 @@ static inline void | |||
279 | __nfqnl_flush(struct nfqnl_instance *queue, int verdict) | 279 | __nfqnl_flush(struct nfqnl_instance *queue, int verdict) |
280 | { | 280 | { |
281 | struct nfqnl_queue_entry *entry; | 281 | struct nfqnl_queue_entry *entry; |
282 | 282 | ||
283 | while ((entry = __find_dequeue_entry(queue, NULL, 0))) | 283 | while ((entry = __find_dequeue_entry(queue, NULL, 0))) |
284 | issue_verdict(entry, verdict); | 284 | issue_verdict(entry, verdict); |
285 | } | 285 | } |
@@ -289,14 +289,14 @@ __nfqnl_set_mode(struct nfqnl_instance *queue, | |||
289 | unsigned char mode, unsigned int range) | 289 | unsigned char mode, unsigned int range) |
290 | { | 290 | { |
291 | int status = 0; | 291 | int status = 0; |
292 | 292 | ||
293 | switch (mode) { | 293 | switch (mode) { |
294 | case NFQNL_COPY_NONE: | 294 | case NFQNL_COPY_NONE: |
295 | case NFQNL_COPY_META: | 295 | case NFQNL_COPY_META: |
296 | queue->copy_mode = mode; | 296 | queue->copy_mode = mode; |
297 | queue->copy_range = 0; | 297 | queue->copy_range = 0; |
298 | break; | 298 | break; |
299 | 299 | ||
300 | case NFQNL_COPY_PACKET: | 300 | case NFQNL_COPY_PACKET: |
301 | queue->copy_mode = mode; | 301 | queue->copy_mode = mode; |
302 | /* we're using struct nfattr which has 16bit nfa_len */ | 302 | /* we're using struct nfattr which has 16bit nfa_len */ |
@@ -305,7 +305,7 @@ __nfqnl_set_mode(struct nfqnl_instance *queue, | |||
305 | else | 305 | else |
306 | queue->copy_range = range; | 306 | queue->copy_range = range; |
307 | break; | 307 | break; |
308 | 308 | ||
309 | default: | 309 | default: |
310 | status = -EINVAL; | 310 | status = -EINVAL; |
311 | 311 | ||
@@ -318,7 +318,7 @@ find_dequeue_entry(struct nfqnl_instance *queue, | |||
318 | nfqnl_cmpfn cmpfn, unsigned long data) | 318 | nfqnl_cmpfn cmpfn, unsigned long data) |
319 | { | 319 | { |
320 | struct nfqnl_queue_entry *entry; | 320 | struct nfqnl_queue_entry *entry; |
321 | 321 | ||
322 | spin_lock_bh(&queue->lock); | 322 | spin_lock_bh(&queue->lock); |
323 | entry = __find_dequeue_entry(queue, cmpfn, data); | 323 | entry = __find_dequeue_entry(queue, cmpfn, data); |
324 | spin_unlock_bh(&queue->lock); | 324 | spin_unlock_bh(&queue->lock); |
@@ -369,13 +369,13 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
369 | outdev = entinf->outdev; | 369 | outdev = entinf->outdev; |
370 | 370 | ||
371 | spin_lock_bh(&queue->lock); | 371 | spin_lock_bh(&queue->lock); |
372 | 372 | ||
373 | switch (queue->copy_mode) { | 373 | switch (queue->copy_mode) { |
374 | case NFQNL_COPY_META: | 374 | case NFQNL_COPY_META: |
375 | case NFQNL_COPY_NONE: | 375 | case NFQNL_COPY_NONE: |
376 | data_len = 0; | 376 | data_len = 0; |
377 | break; | 377 | break; |
378 | 378 | ||
379 | case NFQNL_COPY_PACKET: | 379 | case NFQNL_COPY_PACKET: |
380 | if ((entskb->ip_summed == CHECKSUM_PARTIAL || | 380 | if ((entskb->ip_summed == CHECKSUM_PARTIAL || |
381 | entskb->ip_summed == CHECKSUM_COMPLETE) && | 381 | entskb->ip_summed == CHECKSUM_COMPLETE) && |
@@ -383,15 +383,15 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
383 | spin_unlock_bh(&queue->lock); | 383 | spin_unlock_bh(&queue->lock); |
384 | return NULL; | 384 | return NULL; |
385 | } | 385 | } |
386 | if (queue->copy_range == 0 | 386 | if (queue->copy_range == 0 |
387 | || queue->copy_range > entskb->len) | 387 | || queue->copy_range > entskb->len) |
388 | data_len = entskb->len; | 388 | data_len = entskb->len; |
389 | else | 389 | else |
390 | data_len = queue->copy_range; | 390 | data_len = queue->copy_range; |
391 | 391 | ||
392 | size += NFA_SPACE(data_len); | 392 | size += NFA_SPACE(data_len); |
393 | break; | 393 | break; |
394 | 394 | ||
395 | default: | 395 | default: |
396 | *errp = -EINVAL; | 396 | *errp = -EINVAL; |
397 | spin_unlock_bh(&queue->lock); | 397 | spin_unlock_bh(&queue->lock); |
@@ -403,9 +403,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
403 | skb = alloc_skb(size, GFP_ATOMIC); | 403 | skb = alloc_skb(size, GFP_ATOMIC); |
404 | if (!skb) | 404 | if (!skb) |
405 | goto nlmsg_failure; | 405 | goto nlmsg_failure; |
406 | 406 | ||
407 | old_tail= skb->tail; | 407 | old_tail= skb->tail; |
408 | nlh = NLMSG_PUT(skb, 0, 0, | 408 | nlh = NLMSG_PUT(skb, 0, 0, |
409 | NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, | 409 | NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, |
410 | sizeof(struct nfgenmsg)); | 410 | sizeof(struct nfgenmsg)); |
411 | nfmsg = NLMSG_DATA(nlh); | 411 | nfmsg = NLMSG_DATA(nlh); |
@@ -427,9 +427,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
427 | #else | 427 | #else |
428 | if (entinf->pf == PF_BRIDGE) { | 428 | if (entinf->pf == PF_BRIDGE) { |
429 | /* Case 1: indev is physical input device, we need to | 429 | /* Case 1: indev is physical input device, we need to |
430 | * look for bridge group (when called from | 430 | * look for bridge group (when called from |
431 | * netfilter_bridge) */ | 431 | * netfilter_bridge) */ |
432 | NFA_PUT(skb, NFQA_IFINDEX_PHYSINDEV, sizeof(tmp_uint), | 432 | NFA_PUT(skb, NFQA_IFINDEX_PHYSINDEV, sizeof(tmp_uint), |
433 | &tmp_uint); | 433 | &tmp_uint); |
434 | /* this is the bridge group "brX" */ | 434 | /* this is the bridge group "brX" */ |
435 | tmp_uint = htonl(indev->br_port->br->dev->ifindex); | 435 | tmp_uint = htonl(indev->br_port->br->dev->ifindex); |
@@ -457,7 +457,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
457 | #else | 457 | #else |
458 | if (entinf->pf == PF_BRIDGE) { | 458 | if (entinf->pf == PF_BRIDGE) { |
459 | /* Case 1: outdev is physical output device, we need to | 459 | /* Case 1: outdev is physical output device, we need to |
460 | * look for bridge group (when called from | 460 | * look for bridge group (when called from |
461 | * netfilter_bridge) */ | 461 | * netfilter_bridge) */ |
462 | NFA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint), | 462 | NFA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint), |
463 | &tmp_uint); | 463 | &tmp_uint); |
@@ -490,7 +490,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
490 | struct nfqnl_msg_packet_hw phw; | 490 | struct nfqnl_msg_packet_hw phw; |
491 | 491 | ||
492 | int len = entskb->dev->hard_header_parse(entskb, | 492 | int len = entskb->dev->hard_header_parse(entskb, |
493 | phw.hw_addr); | 493 | phw.hw_addr); |
494 | phw.hw_addrlen = htons(len); | 494 | phw.hw_addrlen = htons(len); |
495 | NFA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); | 495 | NFA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); |
496 | } | 496 | } |
@@ -520,7 +520,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
520 | if (skb_copy_bits(entskb, 0, NFA_DATA(nfa), data_len)) | 520 | if (skb_copy_bits(entskb, 0, NFA_DATA(nfa), data_len)) |
521 | BUG(); | 521 | BUG(); |
522 | } | 522 | } |
523 | 523 | ||
524 | nlh->nlmsg_len = skb->tail - old_tail; | 524 | nlh->nlmsg_len = skb->tail - old_tail; |
525 | return skb; | 525 | return skb; |
526 | 526 | ||
@@ -535,7 +535,7 @@ nfattr_failure: | |||
535 | } | 535 | } |
536 | 536 | ||
537 | static int | 537 | static int |
538 | nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info, | 538 | nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info, |
539 | unsigned int queuenum, void *data) | 539 | unsigned int queuenum, void *data) |
540 | { | 540 | { |
541 | int status = -EINVAL; | 541 | int status = -EINVAL; |
@@ -560,7 +560,7 @@ nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info, | |||
560 | entry = kmalloc(sizeof(*entry), GFP_ATOMIC); | 560 | entry = kmalloc(sizeof(*entry), GFP_ATOMIC); |
561 | if (entry == NULL) { | 561 | if (entry == NULL) { |
562 | if (net_ratelimit()) | 562 | if (net_ratelimit()) |
563 | printk(KERN_ERR | 563 | printk(KERN_ERR |
564 | "nf_queue: OOM in nfqnl_enqueue_packet()\n"); | 564 | "nf_queue: OOM in nfqnl_enqueue_packet()\n"); |
565 | status = -ENOMEM; | 565 | status = -ENOMEM; |
566 | goto err_out_put; | 566 | goto err_out_put; |
@@ -573,18 +573,18 @@ nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info, | |||
573 | nskb = nfqnl_build_packet_message(queue, entry, &status); | 573 | nskb = nfqnl_build_packet_message(queue, entry, &status); |
574 | if (nskb == NULL) | 574 | if (nskb == NULL) |
575 | goto err_out_free; | 575 | goto err_out_free; |
576 | 576 | ||
577 | spin_lock_bh(&queue->lock); | 577 | spin_lock_bh(&queue->lock); |
578 | 578 | ||
579 | if (!queue->peer_pid) | 579 | if (!queue->peer_pid) |
580 | goto err_out_free_nskb; | 580 | goto err_out_free_nskb; |
581 | 581 | ||
582 | if (queue->queue_total >= queue->queue_maxlen) { | 582 | if (queue->queue_total >= queue->queue_maxlen) { |
583 | queue->queue_dropped++; | 583 | queue->queue_dropped++; |
584 | status = -ENOSPC; | 584 | status = -ENOSPC; |
585 | if (net_ratelimit()) | 585 | if (net_ratelimit()) |
586 | printk(KERN_WARNING "nf_queue: full at %d entries, " | 586 | printk(KERN_WARNING "nf_queue: full at %d entries, " |
587 | "dropping packets(s). Dropped: %d\n", | 587 | "dropping packets(s). Dropped: %d\n", |
588 | queue->queue_total, queue->queue_dropped); | 588 | queue->queue_total, queue->queue_dropped); |
589 | goto err_out_free_nskb; | 589 | goto err_out_free_nskb; |
590 | } | 590 | } |
@@ -592,7 +592,7 @@ nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info, | |||
592 | /* nfnetlink_unicast will either free the nskb or add it to a socket */ | 592 | /* nfnetlink_unicast will either free the nskb or add it to a socket */ |
593 | status = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT); | 593 | status = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT); |
594 | if (status < 0) { | 594 | if (status < 0) { |
595 | queue->queue_user_dropped++; | 595 | queue->queue_user_dropped++; |
596 | goto err_out_unlock; | 596 | goto err_out_unlock; |
597 | } | 597 | } |
598 | 598 | ||
@@ -603,8 +603,8 @@ nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info, | |||
603 | return status; | 603 | return status; |
604 | 604 | ||
605 | err_out_free_nskb: | 605 | err_out_free_nskb: |
606 | kfree_skb(nskb); | 606 | kfree_skb(nskb); |
607 | 607 | ||
608 | err_out_unlock: | 608 | err_out_unlock: |
609 | spin_unlock_bh(&queue->lock); | 609 | spin_unlock_bh(&queue->lock); |
610 | 610 | ||
@@ -629,11 +629,11 @@ nfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e) | |||
629 | return -EINVAL; | 629 | return -EINVAL; |
630 | if (diff > skb_tailroom(e->skb)) { | 630 | if (diff > skb_tailroom(e->skb)) { |
631 | struct sk_buff *newskb; | 631 | struct sk_buff *newskb; |
632 | 632 | ||
633 | newskb = skb_copy_expand(e->skb, | 633 | newskb = skb_copy_expand(e->skb, |
634 | skb_headroom(e->skb), | 634 | skb_headroom(e->skb), |
635 | diff, | 635 | diff, |
636 | GFP_ATOMIC); | 636 | GFP_ATOMIC); |
637 | if (newskb == NULL) { | 637 | if (newskb == NULL) { |
638 | printk(KERN_WARNING "nf_queue: OOM " | 638 | printk(KERN_WARNING "nf_queue: OOM " |
639 | "in mangle, dropping packet\n"); | 639 | "in mangle, dropping packet\n"); |
@@ -676,7 +676,7 @@ static int | |||
676 | dev_cmp(struct nfqnl_queue_entry *entry, unsigned long ifindex) | 676 | dev_cmp(struct nfqnl_queue_entry *entry, unsigned long ifindex) |
677 | { | 677 | { |
678 | struct nf_info *entinf = entry->info; | 678 | struct nf_info *entinf = entry->info; |
679 | 679 | ||
680 | if (entinf->indev) | 680 | if (entinf->indev) |
681 | if (entinf->indev->ifindex == ifindex) | 681 | if (entinf->indev->ifindex == ifindex) |
682 | return 1; | 682 | return 1; |
@@ -702,7 +702,7 @@ static void | |||
702 | nfqnl_dev_drop(int ifindex) | 702 | nfqnl_dev_drop(int ifindex) |
703 | { | 703 | { |
704 | int i; | 704 | int i; |
705 | 705 | ||
706 | QDEBUG("entering for ifindex %u\n", ifindex); | 706 | QDEBUG("entering for ifindex %u\n", ifindex); |
707 | 707 | ||
708 | /* this only looks like we have to hold the readlock for a way too long | 708 | /* this only looks like we have to hold the readlock for a way too long |
@@ -717,7 +717,7 @@ nfqnl_dev_drop(int ifindex) | |||
717 | 717 | ||
718 | hlist_for_each_entry(inst, tmp, head, hlist) { | 718 | hlist_for_each_entry(inst, tmp, head, hlist) { |
719 | struct nfqnl_queue_entry *entry; | 719 | struct nfqnl_queue_entry *entry; |
720 | while ((entry = find_dequeue_entry(inst, dev_cmp, | 720 | while ((entry = find_dequeue_entry(inst, dev_cmp, |
721 | ifindex)) != NULL) | 721 | ifindex)) != NULL) |
722 | issue_verdict(entry, NF_DROP); | 722 | issue_verdict(entry, NF_DROP); |
723 | } | 723 | } |
@@ -835,8 +835,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, | |||
835 | 835 | ||
836 | if (nfqa[NFQA_MARK-1]) | 836 | if (nfqa[NFQA_MARK-1]) |
837 | entry->skb->mark = ntohl(*(__be32 *) | 837 | entry->skb->mark = ntohl(*(__be32 *) |
838 | NFA_DATA(nfqa[NFQA_MARK-1])); | 838 | NFA_DATA(nfqa[NFQA_MARK-1])); |
839 | 839 | ||
840 | issue_verdict(entry, verdict); | 840 | issue_verdict(entry, verdict); |
841 | instance_put(queue); | 841 | instance_put(queue); |
842 | return 0; | 842 | return 0; |
@@ -1093,7 +1093,7 @@ static int __init nfnetlink_queue_init(void) | |||
1093 | #ifdef CONFIG_PROC_FS | 1093 | #ifdef CONFIG_PROC_FS |
1094 | struct proc_dir_entry *proc_nfqueue; | 1094 | struct proc_dir_entry *proc_nfqueue; |
1095 | #endif | 1095 | #endif |
1096 | 1096 | ||
1097 | for (i = 0; i < INSTANCE_BUCKETS; i++) | 1097 | for (i = 0; i < INSTANCE_BUCKETS; i++) |
1098 | INIT_HLIST_HEAD(&instance_table[i]); | 1098 | INIT_HLIST_HEAD(&instance_table[i]); |
1099 | 1099 | ||
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 134cc88f8c83..ec607a421a5a 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -305,7 +305,7 @@ int xt_find_revision(int af, const char *name, u8 revision, int target, | |||
305 | EXPORT_SYMBOL_GPL(xt_find_revision); | 305 | EXPORT_SYMBOL_GPL(xt_find_revision); |
306 | 306 | ||
307 | int xt_check_match(const struct xt_match *match, unsigned short family, | 307 | int xt_check_match(const struct xt_match *match, unsigned short family, |
308 | unsigned int size, const char *table, unsigned int hook_mask, | 308 | unsigned int size, const char *table, unsigned int hook_mask, |
309 | unsigned short proto, int inv_proto) | 309 | unsigned short proto, int inv_proto) |
310 | { | 310 | { |
311 | if (XT_ALIGN(match->matchsize) != size) { | 311 | if (XT_ALIGN(match->matchsize) != size) { |
@@ -377,7 +377,7 @@ int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr, | |||
377 | 377 | ||
378 | if (copy_to_user(cm, m, sizeof(*cm)) || | 378 | if (copy_to_user(cm, m, sizeof(*cm)) || |
379 | put_user(msize, &cm->u.user.match_size)) | 379 | put_user(msize, &cm->u.user.match_size)) |
380 | return -EFAULT; | 380 | return -EFAULT; |
381 | 381 | ||
382 | if (match->compat_to_user) { | 382 | if (match->compat_to_user) { |
383 | if (match->compat_to_user((void __user *)cm->data, m->data)) | 383 | if (match->compat_to_user((void __user *)cm->data, m->data)) |
@@ -432,7 +432,7 @@ int xt_compat_target_offset(struct xt_target *target) | |||
432 | EXPORT_SYMBOL_GPL(xt_compat_target_offset); | 432 | EXPORT_SYMBOL_GPL(xt_compat_target_offset); |
433 | 433 | ||
434 | void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, | 434 | void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, |
435 | int *size) | 435 | int *size) |
436 | { | 436 | { |
437 | struct xt_target *target = t->u.kernel.target; | 437 | struct xt_target *target = t->u.kernel.target; |
438 | struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; | 438 | struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; |
@@ -467,7 +467,7 @@ int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr, | |||
467 | 467 | ||
468 | if (copy_to_user(ct, t, sizeof(*ct)) || | 468 | if (copy_to_user(ct, t, sizeof(*ct)) || |
469 | put_user(tsize, &ct->u.user.target_size)) | 469 | put_user(tsize, &ct->u.user.target_size)) |
470 | return -EFAULT; | 470 | return -EFAULT; |
471 | 471 | ||
472 | if (target->compat_to_user) { | 472 | if (target->compat_to_user) { |
473 | if (target->compat_to_user((void __user *)ct->data, t->data)) | 473 | if (target->compat_to_user((void __user *)ct->data, t->data)) |
@@ -710,7 +710,7 @@ static void *xt_tgt_seq_start(struct seq_file *seq, loff_t *pos) | |||
710 | 710 | ||
711 | if (mutex_lock_interruptible(&xt[af].mutex) != 0) | 711 | if (mutex_lock_interruptible(&xt[af].mutex) != 0) |
712 | return NULL; | 712 | return NULL; |
713 | 713 | ||
714 | return xt_get_idx(list, seq, *pos); | 714 | return xt_get_idx(list, seq, *pos); |
715 | } | 715 | } |
716 | 716 | ||
@@ -723,7 +723,7 @@ static void *xt_tgt_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
723 | 723 | ||
724 | if (af >= NPROTO) | 724 | if (af >= NPROTO) |
725 | return NULL; | 725 | return NULL; |
726 | 726 | ||
727 | list = type2list(af, type); | 727 | list = type2list(af, type); |
728 | if (!list) | 728 | if (!list) |
729 | return NULL; | 729 | return NULL; |
diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c index 195e92990da7..30884833e665 100644 --- a/net/netfilter/xt_CLASSIFY.c +++ b/net/netfilter/xt_CLASSIFY.c | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/ip.h> | 15 | #include <linux/ip.h> |
16 | #include <net/checksum.h> | 16 | #include <net/checksum.h> |
17 | 17 | ||
18 | #include <linux/netfilter_ipv4.h> | ||
19 | #include <linux/netfilter_ipv6.h> | ||
18 | #include <linux/netfilter/x_tables.h> | 20 | #include <linux/netfilter/x_tables.h> |
19 | #include <linux/netfilter/xt_CLASSIFY.h> | 21 | #include <linux/netfilter/xt_CLASSIFY.h> |
20 | 22 | ||
@@ -46,7 +48,7 @@ static struct xt_target xt_classify_target[] = { | |||
46 | .table = "mangle", | 48 | .table = "mangle", |
47 | .hooks = (1 << NF_IP_LOCAL_OUT) | | 49 | .hooks = (1 << NF_IP_LOCAL_OUT) | |
48 | (1 << NF_IP_FORWARD) | | 50 | (1 << NF_IP_FORWARD) | |
49 | (1 << NF_IP_POST_ROUTING), | 51 | (1 << NF_IP_POST_ROUTING), |
50 | .me = THIS_MODULE, | 52 | .me = THIS_MODULE, |
51 | }, | 53 | }, |
52 | { | 54 | { |
@@ -55,9 +57,9 @@ static struct xt_target xt_classify_target[] = { | |||
55 | .target = target, | 57 | .target = target, |
56 | .targetsize = sizeof(struct xt_classify_target_info), | 58 | .targetsize = sizeof(struct xt_classify_target_info), |
57 | .table = "mangle", | 59 | .table = "mangle", |
58 | .hooks = (1 << NF_IP_LOCAL_OUT) | | 60 | .hooks = (1 << NF_IP6_LOCAL_OUT) | |
59 | (1 << NF_IP_FORWARD) | | 61 | (1 << NF_IP6_FORWARD) | |
60 | (1 << NF_IP_POST_ROUTING), | 62 | (1 << NF_IP6_POST_ROUTING), |
61 | .me = THIS_MODULE, | 63 | .me = THIS_MODULE, |
62 | }, | 64 | }, |
63 | }; | 65 | }; |
diff --git a/net/netfilter/xt_MARK.c b/net/netfilter/xt_MARK.c index cfc45af357d5..43817808d865 100644 --- a/net/netfilter/xt_MARK.c +++ b/net/netfilter/xt_MARK.c | |||
@@ -50,11 +50,11 @@ target_v1(struct sk_buff **pskb, | |||
50 | case XT_MARK_SET: | 50 | case XT_MARK_SET: |
51 | mark = markinfo->mark; | 51 | mark = markinfo->mark; |
52 | break; | 52 | break; |
53 | 53 | ||
54 | case XT_MARK_AND: | 54 | case XT_MARK_AND: |
55 | mark = (*pskb)->mark & markinfo->mark; | 55 | mark = (*pskb)->mark & markinfo->mark; |
56 | break; | 56 | break; |
57 | 57 | ||
58 | case XT_MARK_OR: | 58 | case XT_MARK_OR: |
59 | mark = (*pskb)->mark | markinfo->mark; | 59 | mark = (*pskb)->mark | markinfo->mark; |
60 | break; | 60 | break; |
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c index 39e117502bd7..201155b316e0 100644 --- a/net/netfilter/xt_NFQUEUE.c +++ b/net/netfilter/xt_NFQUEUE.c | |||
@@ -3,9 +3,9 @@ | |||
3 | * (C) 2005 by Harald Welte <laforge@netfilter.org> | 3 | * (C) 2005 by Harald Welte <laforge@netfilter.org> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c index 6d00dcaed238..b874a2008b2b 100644 --- a/net/netfilter/xt_NOTRACK.c +++ b/net/netfilter/xt_NOTRACK.c | |||
@@ -22,8 +22,8 @@ target(struct sk_buff **pskb, | |||
22 | if ((*pskb)->nfct != NULL) | 22 | if ((*pskb)->nfct != NULL) |
23 | return XT_CONTINUE; | 23 | return XT_CONTINUE; |
24 | 24 | ||
25 | /* Attach fake conntrack entry. | 25 | /* Attach fake conntrack entry. |
26 | If there is a real ct entry correspondig to this packet, | 26 | If there is a real ct entry correspondig to this packet, |
27 | it'll hang aroun till timing out. We don't deal with it | 27 | it'll hang aroun till timing out. We don't deal with it |
28 | for performance reasons. JK */ | 28 | for performance reasons. JK */ |
29 | nf_ct_untrack(*pskb); | 29 | nf_ct_untrack(*pskb); |
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c index f1131c3a9db5..705f0e830a79 100644 --- a/net/netfilter/xt_SECMARK.c +++ b/net/netfilter/xt_SECMARK.c | |||
@@ -55,7 +55,7 @@ static int checkentry_selinux(struct xt_secmark_target_info *info) | |||
55 | { | 55 | { |
56 | int err; | 56 | int err; |
57 | struct xt_secmark_target_selinux_info *sel = &info->u.sel; | 57 | struct xt_secmark_target_selinux_info *sel = &info->u.sel; |
58 | 58 | ||
59 | sel->selctx[SECMARK_SELCTX_MAX - 1] = '\0'; | 59 | sel->selctx[SECMARK_SELCTX_MAX - 1] = '\0'; |
60 | 60 | ||
61 | err = selinux_string_to_sid(sel->selctx, &sel->selsid); | 61 | err = selinux_string_to_sid(sel->selctx, &sel->selsid); |
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index 3dc2357b8de8..2885c378288e 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c | |||
@@ -51,10 +51,10 @@ match(const struct sk_buff *skb, | |||
51 | if (ct == &ip_conntrack_untracked) | 51 | if (ct == &ip_conntrack_untracked) |
52 | statebit = XT_CONNTRACK_STATE_UNTRACKED; | 52 | statebit = XT_CONNTRACK_STATE_UNTRACKED; |
53 | else if (ct) | 53 | else if (ct) |
54 | statebit = XT_CONNTRACK_STATE_BIT(ctinfo); | 54 | statebit = XT_CONNTRACK_STATE_BIT(ctinfo); |
55 | else | 55 | else |
56 | statebit = XT_CONNTRACK_STATE_INVALID; | 56 | statebit = XT_CONNTRACK_STATE_INVALID; |
57 | 57 | ||
58 | if (sinfo->flags & XT_CONNTRACK_STATE) { | 58 | if (sinfo->flags & XT_CONNTRACK_STATE) { |
59 | if (ct) { | 59 | if (ct) { |
60 | if (test_bit(IPS_SRC_NAT_BIT, &ct->status)) | 60 | if (test_bit(IPS_SRC_NAT_BIT, &ct->status)) |
@@ -77,7 +77,7 @@ match(const struct sk_buff *skb, | |||
77 | FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != | 77 | FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != |
78 | sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, | 78 | sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, |
79 | XT_CONNTRACK_PROTO)) | 79 | XT_CONNTRACK_PROTO)) |
80 | return 0; | 80 | return 0; |
81 | 81 | ||
82 | if (sinfo->flags & XT_CONNTRACK_ORIGSRC && | 82 | if (sinfo->flags & XT_CONNTRACK_ORIGSRC && |
83 | FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip & | 83 | FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip & |
@@ -147,10 +147,10 @@ match(const struct sk_buff *skb, | |||
147 | if (ct == &nf_conntrack_untracked) | 147 | if (ct == &nf_conntrack_untracked) |
148 | statebit = XT_CONNTRACK_STATE_UNTRACKED; | 148 | statebit = XT_CONNTRACK_STATE_UNTRACKED; |
149 | else if (ct) | 149 | else if (ct) |
150 | statebit = XT_CONNTRACK_STATE_BIT(ctinfo); | 150 | statebit = XT_CONNTRACK_STATE_BIT(ctinfo); |
151 | else | 151 | else |
152 | statebit = XT_CONNTRACK_STATE_INVALID; | 152 | statebit = XT_CONNTRACK_STATE_INVALID; |
153 | 153 | ||
154 | if (sinfo->flags & XT_CONNTRACK_STATE) { | 154 | if (sinfo->flags & XT_CONNTRACK_STATE) { |
155 | if (ct) { | 155 | if (ct) { |
156 | if (test_bit(IPS_SRC_NAT_BIT, &ct->status)) | 156 | if (test_bit(IPS_SRC_NAT_BIT, &ct->status)) |
@@ -171,41 +171,41 @@ match(const struct sk_buff *skb, | |||
171 | 171 | ||
172 | if (sinfo->flags & XT_CONNTRACK_PROTO && | 172 | if (sinfo->flags & XT_CONNTRACK_PROTO && |
173 | FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != | 173 | FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != |
174 | sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, | 174 | sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, |
175 | XT_CONNTRACK_PROTO)) | 175 | XT_CONNTRACK_PROTO)) |
176 | return 0; | 176 | return 0; |
177 | 177 | ||
178 | if (sinfo->flags & XT_CONNTRACK_ORIGSRC && | 178 | if (sinfo->flags & XT_CONNTRACK_ORIGSRC && |
179 | FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip & | 179 | FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip & |
180 | sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != | 180 | sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != |
181 | sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, | 181 | sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, |
182 | XT_CONNTRACK_ORIGSRC)) | 182 | XT_CONNTRACK_ORIGSRC)) |
183 | return 0; | 183 | return 0; |
184 | 184 | ||
185 | if (sinfo->flags & XT_CONNTRACK_ORIGDST && | 185 | if (sinfo->flags & XT_CONNTRACK_ORIGDST && |
186 | FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip & | 186 | FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip & |
187 | sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != | 187 | sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != |
188 | sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, | 188 | sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, |
189 | XT_CONNTRACK_ORIGDST)) | 189 | XT_CONNTRACK_ORIGDST)) |
190 | return 0; | 190 | return 0; |
191 | 191 | ||
192 | if (sinfo->flags & XT_CONNTRACK_REPLSRC && | 192 | if (sinfo->flags & XT_CONNTRACK_REPLSRC && |
193 | FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip & | 193 | FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip & |
194 | sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != | 194 | sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != |
195 | sinfo->tuple[IP_CT_DIR_REPLY].src.ip, | 195 | sinfo->tuple[IP_CT_DIR_REPLY].src.ip, |
196 | XT_CONNTRACK_REPLSRC)) | 196 | XT_CONNTRACK_REPLSRC)) |
197 | return 0; | 197 | return 0; |
198 | 198 | ||
199 | if (sinfo->flags & XT_CONNTRACK_REPLDST && | 199 | if (sinfo->flags & XT_CONNTRACK_REPLDST && |
200 | FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip & | 200 | FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip & |
201 | sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != | 201 | sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != |
202 | sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, | 202 | sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, |
203 | XT_CONNTRACK_REPLDST)) | 203 | XT_CONNTRACK_REPLDST)) |
204 | return 0; | 204 | return 0; |
205 | 205 | ||
206 | if (sinfo->flags & XT_CONNTRACK_STATUS && | 206 | if (sinfo->flags & XT_CONNTRACK_STATUS && |
207 | FWINV((ct->status & sinfo->statusmask) == 0, | 207 | FWINV((ct->status & sinfo->statusmask) == 0, |
208 | XT_CONNTRACK_STATUS)) | 208 | XT_CONNTRACK_STATUS)) |
209 | return 0; | 209 | return 0; |
210 | 210 | ||
211 | if(sinfo->flags & XT_CONNTRACK_EXPIRES) { | 211 | if(sinfo->flags & XT_CONNTRACK_EXPIRES) { |
diff --git a/net/netfilter/xt_dccp.c b/net/netfilter/xt_dccp.c index 3e6cf430e518..2c9c0dee8aaf 100644 --- a/net/netfilter/xt_dccp.c +++ b/net/netfilter/xt_dccp.c | |||
@@ -26,7 +26,7 @@ MODULE_DESCRIPTION("Match for DCCP protocol packets"); | |||
26 | MODULE_ALIAS("ipt_dccp"); | 26 | MODULE_ALIAS("ipt_dccp"); |
27 | 27 | ||
28 | #define DCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \ | 28 | #define DCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \ |
29 | || (!!((invflag) & (option)) ^ (cond))) | 29 | || (!!((invflag) & (option)) ^ (cond))) |
30 | 30 | ||
31 | static unsigned char *dccp_optbuf; | 31 | static unsigned char *dccp_optbuf; |
32 | static DEFINE_SPINLOCK(dccp_buflock); | 32 | static DEFINE_SPINLOCK(dccp_buflock); |
@@ -67,9 +67,9 @@ dccp_find_option(u_int8_t option, | |||
67 | return 1; | 67 | return 1; |
68 | } | 68 | } |
69 | 69 | ||
70 | if (op[i] < 2) | 70 | if (op[i] < 2) |
71 | i++; | 71 | i++; |
72 | else | 72 | else |
73 | i += op[i+1]?:1; | 73 | i += op[i+1]?:1; |
74 | } | 74 | } |
75 | 75 | ||
@@ -106,18 +106,18 @@ match(const struct sk_buff *skb, | |||
106 | 106 | ||
107 | if (offset) | 107 | if (offset) |
108 | return 0; | 108 | return 0; |
109 | 109 | ||
110 | dh = skb_header_pointer(skb, protoff, sizeof(_dh), &_dh); | 110 | dh = skb_header_pointer(skb, protoff, sizeof(_dh), &_dh); |
111 | if (dh == NULL) { | 111 | if (dh == NULL) { |
112 | *hotdrop = 1; | 112 | *hotdrop = 1; |
113 | return 0; | 113 | return 0; |
114 | } | 114 | } |
115 | 115 | ||
116 | return DCCHECK(((ntohs(dh->dccph_sport) >= info->spts[0]) | 116 | return DCCHECK(((ntohs(dh->dccph_sport) >= info->spts[0]) |
117 | && (ntohs(dh->dccph_sport) <= info->spts[1])), | 117 | && (ntohs(dh->dccph_sport) <= info->spts[1])), |
118 | XT_DCCP_SRC_PORTS, info->flags, info->invflags) | 118 | XT_DCCP_SRC_PORTS, info->flags, info->invflags) |
119 | && DCCHECK(((ntohs(dh->dccph_dport) >= info->dpts[0]) | 119 | && DCCHECK(((ntohs(dh->dccph_dport) >= info->dpts[0]) |
120 | && (ntohs(dh->dccph_dport) <= info->dpts[1])), | 120 | && (ntohs(dh->dccph_dport) <= info->dpts[1])), |
121 | XT_DCCP_DEST_PORTS, info->flags, info->invflags) | 121 | XT_DCCP_DEST_PORTS, info->flags, info->invflags) |
122 | && DCCHECK(match_types(dh, info->typemask), | 122 | && DCCHECK(match_types(dh, info->typemask), |
123 | XT_DCCP_TYPE, info->flags, info->invflags) | 123 | XT_DCCP_TYPE, info->flags, info->invflags) |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 269a1e793478..9f37d593ca38 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -208,7 +208,7 @@ static int htable_create(struct xt_hashlimit_info *minfo, int family) | |||
208 | spin_lock_init(&hinfo->lock); | 208 | spin_lock_init(&hinfo->lock); |
209 | hinfo->pde = create_proc_entry(minfo->name, 0, | 209 | hinfo->pde = create_proc_entry(minfo->name, 0, |
210 | family == AF_INET ? hashlimit_procdir4 : | 210 | family == AF_INET ? hashlimit_procdir4 : |
211 | hashlimit_procdir6); | 211 | hashlimit_procdir6); |
212 | if (!hinfo->pde) { | 212 | if (!hinfo->pde) { |
213 | vfree(hinfo); | 213 | vfree(hinfo); |
214 | return -1; | 214 | return -1; |
@@ -240,7 +240,7 @@ static int select_gc(struct xt_hashlimit_htable *ht, struct dsthash_ent *he) | |||
240 | } | 240 | } |
241 | 241 | ||
242 | static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, | 242 | static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, |
243 | int (*select)(struct xt_hashlimit_htable *ht, | 243 | int (*select)(struct xt_hashlimit_htable *ht, |
244 | struct dsthash_ent *he)) | 244 | struct dsthash_ent *he)) |
245 | { | 245 | { |
246 | unsigned int i; | 246 | unsigned int i; |
@@ -279,7 +279,7 @@ static void htable_destroy(struct xt_hashlimit_htable *hinfo) | |||
279 | /* remove proc entry */ | 279 | /* remove proc entry */ |
280 | remove_proc_entry(hinfo->pde->name, | 280 | remove_proc_entry(hinfo->pde->name, |
281 | hinfo->family == AF_INET ? hashlimit_procdir4 : | 281 | hinfo->family == AF_INET ? hashlimit_procdir4 : |
282 | hashlimit_procdir6); | 282 | hashlimit_procdir6); |
283 | htable_selective_cleanup(hinfo, select_all); | 283 | htable_selective_cleanup(hinfo, select_all); |
284 | vfree(hinfo); | 284 | vfree(hinfo); |
285 | } | 285 | } |
@@ -483,7 +483,7 @@ hashlimit_match(const struct sk_buff *skb, | |||
483 | return 1; | 483 | return 1; |
484 | } | 484 | } |
485 | 485 | ||
486 | spin_unlock_bh(&hinfo->lock); | 486 | spin_unlock_bh(&hinfo->lock); |
487 | 487 | ||
488 | /* default case: we're overlimit, thus don't match */ | 488 | /* default case: we're overlimit, thus don't match */ |
489 | return 0; | 489 | return 0; |
diff --git a/net/netfilter/xt_helper.c b/net/netfilter/xt_helper.c index 04bc32ba7195..407d1d5da8a1 100644 --- a/net/netfilter/xt_helper.c +++ b/net/netfilter/xt_helper.c | |||
@@ -53,7 +53,7 @@ match(const struct sk_buff *skb, | |||
53 | struct ip_conntrack *ct; | 53 | struct ip_conntrack *ct; |
54 | enum ip_conntrack_info ctinfo; | 54 | enum ip_conntrack_info ctinfo; |
55 | int ret = info->invert; | 55 | int ret = info->invert; |
56 | 56 | ||
57 | ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo); | 57 | ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo); |
58 | if (!ct) { | 58 | if (!ct) { |
59 | DEBUGP("xt_helper: Eek! invalid conntrack?\n"); | 59 | DEBUGP("xt_helper: Eek! invalid conntrack?\n"); |
@@ -67,19 +67,19 @@ match(const struct sk_buff *skb, | |||
67 | 67 | ||
68 | read_lock_bh(&ip_conntrack_lock); | 68 | read_lock_bh(&ip_conntrack_lock); |
69 | if (!ct->master->helper) { | 69 | if (!ct->master->helper) { |
70 | DEBUGP("xt_helper: master ct %p has no helper\n", | 70 | DEBUGP("xt_helper: master ct %p has no helper\n", |
71 | exp->expectant); | 71 | exp->expectant); |
72 | goto out_unlock; | 72 | goto out_unlock; |
73 | } | 73 | } |
74 | 74 | ||
75 | DEBUGP("master's name = %s , info->name = %s\n", | 75 | DEBUGP("master's name = %s , info->name = %s\n", |
76 | ct->master->helper->name, info->name); | 76 | ct->master->helper->name, info->name); |
77 | 77 | ||
78 | if (info->name[0] == '\0') | 78 | if (info->name[0] == '\0') |
79 | ret ^= 1; | 79 | ret ^= 1; |
80 | else | 80 | else |
81 | ret ^= !strncmp(ct->master->helper->name, info->name, | 81 | ret ^= !strncmp(ct->master->helper->name, info->name, |
82 | strlen(ct->master->helper->name)); | 82 | strlen(ct->master->helper->name)); |
83 | out_unlock: | 83 | out_unlock: |
84 | read_unlock_bh(&ip_conntrack_lock); | 84 | read_unlock_bh(&ip_conntrack_lock); |
85 | return ret; | 85 | return ret; |
@@ -102,7 +102,7 @@ match(const struct sk_buff *skb, | |||
102 | struct nf_conn_help *master_help; | 102 | struct nf_conn_help *master_help; |
103 | enum ip_conntrack_info ctinfo; | 103 | enum ip_conntrack_info ctinfo; |
104 | int ret = info->invert; | 104 | int ret = info->invert; |
105 | 105 | ||
106 | ct = nf_ct_get((struct sk_buff *)skb, &ctinfo); | 106 | ct = nf_ct_get((struct sk_buff *)skb, &ctinfo); |
107 | if (!ct) { | 107 | if (!ct) { |
108 | DEBUGP("xt_helper: Eek! invalid conntrack?\n"); | 108 | DEBUGP("xt_helper: Eek! invalid conntrack?\n"); |
@@ -117,19 +117,19 @@ match(const struct sk_buff *skb, | |||
117 | read_lock_bh(&nf_conntrack_lock); | 117 | read_lock_bh(&nf_conntrack_lock); |
118 | master_help = nfct_help(ct->master); | 118 | master_help = nfct_help(ct->master); |
119 | if (!master_help || !master_help->helper) { | 119 | if (!master_help || !master_help->helper) { |
120 | DEBUGP("xt_helper: master ct %p has no helper\n", | 120 | DEBUGP("xt_helper: master ct %p has no helper\n", |
121 | exp->expectant); | 121 | exp->expectant); |
122 | goto out_unlock; | 122 | goto out_unlock; |
123 | } | 123 | } |
124 | 124 | ||
125 | DEBUGP("master's name = %s , info->name = %s\n", | 125 | DEBUGP("master's name = %s , info->name = %s\n", |
126 | ct->master->helper->name, info->name); | 126 | ct->master->helper->name, info->name); |
127 | 127 | ||
128 | if (info->name[0] == '\0') | 128 | if (info->name[0] == '\0') |
129 | ret ^= 1; | 129 | ret ^= 1; |
130 | else | 130 | else |
131 | ret ^= !strncmp(master_help->helper->name, info->name, | 131 | ret ^= !strncmp(master_help->helper->name, info->name, |
132 | strlen(master_help->helper->name)); | 132 | strlen(master_help->helper->name)); |
133 | out_unlock: | 133 | out_unlock: |
134 | read_unlock_bh(&nf_conntrack_lock); | 134 | read_unlock_bh(&nf_conntrack_lock); |
135 | return ret; | 135 | return ret; |
diff --git a/net/netfilter/xt_length.c b/net/netfilter/xt_length.c index 67fd30d9f303..32fb998d9bac 100644 --- a/net/netfilter/xt_length.c +++ b/net/netfilter/xt_length.c | |||
@@ -32,7 +32,7 @@ match(const struct sk_buff *skb, | |||
32 | { | 32 | { |
33 | const struct xt_length_info *info = matchinfo; | 33 | const struct xt_length_info *info = matchinfo; |
34 | u_int16_t pktlen = ntohs(skb->nh.iph->tot_len); | 34 | u_int16_t pktlen = ntohs(skb->nh.iph->tot_len); |
35 | 35 | ||
36 | return (pktlen >= info->min && pktlen <= info->max) ^ info->invert; | 36 | return (pktlen >= info->min && pktlen <= info->max) ^ info->invert; |
37 | } | 37 | } |
38 | 38 | ||
@@ -48,7 +48,7 @@ match6(const struct sk_buff *skb, | |||
48 | { | 48 | { |
49 | const struct xt_length_info *info = matchinfo; | 49 | const struct xt_length_info *info = matchinfo; |
50 | u_int16_t pktlen = ntohs(skb->nh.ipv6h->payload_len) + sizeof(struct ipv6hdr); | 50 | u_int16_t pktlen = ntohs(skb->nh.ipv6h->payload_len) + sizeof(struct ipv6hdr); |
51 | 51 | ||
52 | return (pktlen >= info->min && pktlen <= info->max) ^ info->invert; | 52 | return (pktlen >= info->min && pktlen <= info->max) ^ info->invert; |
53 | } | 53 | } |
54 | 54 | ||
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index fda7b7dec27d..6fd8347c0058 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c | |||
@@ -89,7 +89,7 @@ ipt_limit_match(const struct sk_buff *skb, | |||
89 | return 1; | 89 | return 1; |
90 | } | 90 | } |
91 | 91 | ||
92 | spin_unlock_bh(&limit_lock); | 92 | spin_unlock_bh(&limit_lock); |
93 | return 0; | 93 | return 0; |
94 | } | 94 | } |
95 | 95 | ||
diff --git a/net/netfilter/xt_mac.c b/net/netfilter/xt_mac.c index 425fc21e31f5..d430d90d7b26 100644 --- a/net/netfilter/xt_mac.c +++ b/net/netfilter/xt_mac.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/etherdevice.h> | 14 | #include <linux/etherdevice.h> |
15 | 15 | ||
16 | #include <linux/netfilter_ipv4.h> | 16 | #include <linux/netfilter_ipv4.h> |
17 | #include <linux/netfilter_ipv6.h> | ||
17 | #include <linux/netfilter/xt_mac.h> | 18 | #include <linux/netfilter/xt_mac.h> |
18 | #include <linux/netfilter/x_tables.h> | 19 | #include <linux/netfilter/x_tables.h> |
19 | 20 | ||
@@ -59,9 +60,9 @@ static struct xt_match xt_mac_match[] = { | |||
59 | .family = AF_INET6, | 60 | .family = AF_INET6, |
60 | .match = match, | 61 | .match = match, |
61 | .matchsize = sizeof(struct xt_mac_info), | 62 | .matchsize = sizeof(struct xt_mac_info), |
62 | .hooks = (1 << NF_IP_PRE_ROUTING) | | 63 | .hooks = (1 << NF_IP6_PRE_ROUTING) | |
63 | (1 << NF_IP_LOCAL_IN) | | 64 | (1 << NF_IP6_LOCAL_IN) | |
64 | (1 << NF_IP_FORWARD), | 65 | (1 << NF_IP6_FORWARD), |
65 | .me = THIS_MODULE, | 66 | .me = THIS_MODULE, |
66 | }, | 67 | }, |
67 | }; | 68 | }; |
diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c index dfa1ee6914c0..39911dddb011 100644 --- a/net/netfilter/xt_mark.c +++ b/net/netfilter/xt_mark.c | |||
@@ -36,10 +36,10 @@ match(const struct sk_buff *skb, | |||
36 | 36 | ||
37 | static int | 37 | static int |
38 | checkentry(const char *tablename, | 38 | checkentry(const char *tablename, |
39 | const void *entry, | 39 | const void *entry, |
40 | const struct xt_match *match, | 40 | const struct xt_match *match, |
41 | void *matchinfo, | 41 | void *matchinfo, |
42 | unsigned int hook_mask) | 42 | unsigned int hook_mask) |
43 | { | 43 | { |
44 | const struct xt_mark_info *minfo = matchinfo; | 44 | const struct xt_mark_info *minfo = matchinfo; |
45 | 45 | ||
diff --git a/net/netfilter/xt_multiport.c b/net/netfilter/xt_multiport.c index 1602086c7fd6..4dce2a81702a 100644 --- a/net/netfilter/xt_multiport.c +++ b/net/netfilter/xt_multiport.c | |||
@@ -91,7 +91,7 @@ ports_match_v1(const struct xt_multiport_v1 *minfo, | |||
91 | } | 91 | } |
92 | } | 92 | } |
93 | 93 | ||
94 | return minfo->invert; | 94 | return minfo->invert; |
95 | } | 95 | } |
96 | 96 | ||
97 | static int | 97 | static int |
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c index b9b3ffc5451d..35a0fe200c39 100644 --- a/net/netfilter/xt_physdev.c +++ b/net/netfilter/xt_physdev.c | |||
@@ -117,7 +117,7 @@ checkentry(const char *tablename, | |||
117 | (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || | 117 | (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || |
118 | info->invert & XT_PHYSDEV_OP_BRIDGED) && | 118 | info->invert & XT_PHYSDEV_OP_BRIDGED) && |
119 | hook_mask & ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) | | 119 | hook_mask & ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) | |
120 | (1 << NF_IP_POST_ROUTING))) { | 120 | (1 << NF_IP_POST_ROUTING))) { |
121 | printk(KERN_WARNING "physdev match: using --physdev-out in the " | 121 | printk(KERN_WARNING "physdev match: using --physdev-out in the " |
122 | "OUTPUT, FORWARD and POSTROUTING chains for non-bridged " | 122 | "OUTPUT, FORWARD and POSTROUTING chains for non-bridged " |
123 | "traffic is not supported anymore.\n"); | 123 | "traffic is not supported anymore.\n"); |
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c index 46bde2b1e1e0..15b45a95ec13 100644 --- a/net/netfilter/xt_policy.c +++ b/net/netfilter/xt_policy.c | |||
@@ -109,13 +109,13 @@ match_policy_out(const struct sk_buff *skb, const struct xt_policy_info *info, | |||
109 | } | 109 | } |
110 | 110 | ||
111 | static int match(const struct sk_buff *skb, | 111 | static int match(const struct sk_buff *skb, |
112 | const struct net_device *in, | 112 | const struct net_device *in, |
113 | const struct net_device *out, | 113 | const struct net_device *out, |
114 | const struct xt_match *match, | 114 | const struct xt_match *match, |
115 | const void *matchinfo, | 115 | const void *matchinfo, |
116 | int offset, | 116 | int offset, |
117 | unsigned int protoff, | 117 | unsigned int protoff, |
118 | int *hotdrop) | 118 | int *hotdrop) |
119 | { | 119 | { |
120 | const struct xt_policy_info *info = matchinfo; | 120 | const struct xt_policy_info *info = matchinfo; |
121 | int ret; | 121 | int ret; |
@@ -134,27 +134,27 @@ static int match(const struct sk_buff *skb, | |||
134 | } | 134 | } |
135 | 135 | ||
136 | static int checkentry(const char *tablename, const void *ip_void, | 136 | static int checkentry(const char *tablename, const void *ip_void, |
137 | const struct xt_match *match, | 137 | const struct xt_match *match, |
138 | void *matchinfo, unsigned int hook_mask) | 138 | void *matchinfo, unsigned int hook_mask) |
139 | { | 139 | { |
140 | struct xt_policy_info *info = matchinfo; | 140 | struct xt_policy_info *info = matchinfo; |
141 | 141 | ||
142 | if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) { | 142 | if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) { |
143 | printk(KERN_ERR "xt_policy: neither incoming nor " | 143 | printk(KERN_ERR "xt_policy: neither incoming nor " |
144 | "outgoing policy selected\n"); | 144 | "outgoing policy selected\n"); |
145 | return 0; | 145 | return 0; |
146 | } | 146 | } |
147 | /* hook values are equal for IPv4 and IPv6 */ | 147 | /* hook values are equal for IPv4 and IPv6 */ |
148 | if (hook_mask & (1 << NF_IP_PRE_ROUTING | 1 << NF_IP_LOCAL_IN) | 148 | if (hook_mask & (1 << NF_IP_PRE_ROUTING | 1 << NF_IP_LOCAL_IN) |
149 | && info->flags & XT_POLICY_MATCH_OUT) { | 149 | && info->flags & XT_POLICY_MATCH_OUT) { |
150 | printk(KERN_ERR "xt_policy: output policy not valid in " | 150 | printk(KERN_ERR "xt_policy: output policy not valid in " |
151 | "PRE_ROUTING and INPUT\n"); | 151 | "PRE_ROUTING and INPUT\n"); |
152 | return 0; | 152 | return 0; |
153 | } | 153 | } |
154 | if (hook_mask & (1 << NF_IP_POST_ROUTING | 1 << NF_IP_LOCAL_OUT) | 154 | if (hook_mask & (1 << NF_IP_POST_ROUTING | 1 << NF_IP_LOCAL_OUT) |
155 | && info->flags & XT_POLICY_MATCH_IN) { | 155 | && info->flags & XT_POLICY_MATCH_IN) { |
156 | printk(KERN_ERR "xt_policy: input policy not valid in " | 156 | printk(KERN_ERR "xt_policy: input policy not valid in " |
157 | "POST_ROUTING and OUTPUT\n"); | 157 | "POST_ROUTING and OUTPUT\n"); |
158 | return 0; | 158 | return 0; |
159 | } | 159 | } |
160 | if (info->len > XT_POLICY_MAX_ELEM) { | 160 | if (info->len > XT_POLICY_MAX_ELEM) { |
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c index b75fa2c70e66..bfdde06ca0b7 100644 --- a/net/netfilter/xt_quota.c +++ b/net/netfilter/xt_quota.c | |||
@@ -30,8 +30,8 @@ match(const struct sk_buff *skb, | |||
30 | q->quota -= skb->len; | 30 | q->quota -= skb->len; |
31 | ret ^= 1; | 31 | ret ^= 1; |
32 | } else { | 32 | } else { |
33 | /* we do not allow even small packets from now on */ | 33 | /* we do not allow even small packets from now on */ |
34 | q->quota = 0; | 34 | q->quota = 0; |
35 | } | 35 | } |
36 | spin_unlock_bh("a_lock); | 36 | spin_unlock_bh("a_lock); |
37 | 37 | ||
diff --git a/net/netfilter/xt_realm.c b/net/netfilter/xt_realm.c index a80b7d132b65..97ffc2fbc19d 100644 --- a/net/netfilter/xt_realm.c +++ b/net/netfilter/xt_realm.c | |||
@@ -35,7 +35,7 @@ match(const struct sk_buff *skb, | |||
35 | { | 35 | { |
36 | const struct xt_realm_info *info = matchinfo; | 36 | const struct xt_realm_info *info = matchinfo; |
37 | struct dst_entry *dst = skb->dst; | 37 | struct dst_entry *dst = skb->dst; |
38 | 38 | ||
39 | return (info->id == (dst->tclassid & info->mask)) ^ info->invert; | 39 | return (info->id == (dst->tclassid & info->mask)) ^ info->invert; |
40 | } | 40 | } |
41 | 41 | ||
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c index 71bf036f833c..f86d8d769d47 100644 --- a/net/netfilter/xt_sctp.c +++ b/net/netfilter/xt_sctp.c | |||
@@ -66,9 +66,9 @@ match_packet(const struct sk_buff *skb, | |||
66 | duprintf("Dropping invalid SCTP packet.\n"); | 66 | duprintf("Dropping invalid SCTP packet.\n"); |
67 | *hotdrop = 1; | 67 | *hotdrop = 1; |
68 | return 0; | 68 | return 0; |
69 | } | 69 | } |
70 | 70 | ||
71 | duprintf("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d\tflags: %x\n", | 71 | duprintf("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d\tflags: %x\n", |
72 | ++i, offset, sch->type, htons(sch->length), sch->flags); | 72 | ++i, offset, sch->type, htons(sch->length), sch->flags); |
73 | 73 | ||
74 | offset += (ntohs(sch->length) + 3) & ~3; | 74 | offset += (ntohs(sch->length) + 3) & ~3; |
@@ -78,21 +78,21 @@ match_packet(const struct sk_buff *skb, | |||
78 | if (SCTP_CHUNKMAP_IS_SET(chunkmap, sch->type)) { | 78 | if (SCTP_CHUNKMAP_IS_SET(chunkmap, sch->type)) { |
79 | switch (chunk_match_type) { | 79 | switch (chunk_match_type) { |
80 | case SCTP_CHUNK_MATCH_ANY: | 80 | case SCTP_CHUNK_MATCH_ANY: |
81 | if (match_flags(flag_info, flag_count, | 81 | if (match_flags(flag_info, flag_count, |
82 | sch->type, sch->flags)) { | 82 | sch->type, sch->flags)) { |
83 | return 1; | 83 | return 1; |
84 | } | 84 | } |
85 | break; | 85 | break; |
86 | 86 | ||
87 | case SCTP_CHUNK_MATCH_ALL: | 87 | case SCTP_CHUNK_MATCH_ALL: |
88 | if (match_flags(flag_info, flag_count, | 88 | if (match_flags(flag_info, flag_count, |
89 | sch->type, sch->flags)) { | 89 | sch->type, sch->flags)) { |
90 | SCTP_CHUNKMAP_CLEAR(chunkmapcopy, sch->type); | 90 | SCTP_CHUNKMAP_CLEAR(chunkmapcopy, sch->type); |
91 | } | 91 | } |
92 | break; | 92 | break; |
93 | 93 | ||
94 | case SCTP_CHUNK_MATCH_ONLY: | 94 | case SCTP_CHUNK_MATCH_ONLY: |
95 | if (!match_flags(flag_info, flag_count, | 95 | if (!match_flags(flag_info, flag_count, |
96 | sch->type, sch->flags)) { | 96 | sch->type, sch->flags)) { |
97 | return 0; | 97 | return 0; |
98 | } | 98 | } |
@@ -136,24 +136,24 @@ match(const struct sk_buff *skb, | |||
136 | duprintf("Dropping non-first fragment.. FIXME\n"); | 136 | duprintf("Dropping non-first fragment.. FIXME\n"); |
137 | return 0; | 137 | return 0; |
138 | } | 138 | } |
139 | 139 | ||
140 | sh = skb_header_pointer(skb, protoff, sizeof(_sh), &_sh); | 140 | sh = skb_header_pointer(skb, protoff, sizeof(_sh), &_sh); |
141 | if (sh == NULL) { | 141 | if (sh == NULL) { |
142 | duprintf("Dropping evil TCP offset=0 tinygram.\n"); | 142 | duprintf("Dropping evil TCP offset=0 tinygram.\n"); |
143 | *hotdrop = 1; | 143 | *hotdrop = 1; |
144 | return 0; | 144 | return 0; |
145 | } | 145 | } |
146 | duprintf("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest)); | 146 | duprintf("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest)); |
147 | 147 | ||
148 | return SCCHECK(((ntohs(sh->source) >= info->spts[0]) | 148 | return SCCHECK(((ntohs(sh->source) >= info->spts[0]) |
149 | && (ntohs(sh->source) <= info->spts[1])), | 149 | && (ntohs(sh->source) <= info->spts[1])), |
150 | XT_SCTP_SRC_PORTS, info->flags, info->invflags) | 150 | XT_SCTP_SRC_PORTS, info->flags, info->invflags) |
151 | && SCCHECK(((ntohs(sh->dest) >= info->dpts[0]) | 151 | && SCCHECK(((ntohs(sh->dest) >= info->dpts[0]) |
152 | && (ntohs(sh->dest) <= info->dpts[1])), | 152 | && (ntohs(sh->dest) <= info->dpts[1])), |
153 | XT_SCTP_DEST_PORTS, info->flags, info->invflags) | 153 | XT_SCTP_DEST_PORTS, info->flags, info->invflags) |
154 | && SCCHECK(match_packet(skb, protoff + sizeof (sctp_sctphdr_t), | 154 | && SCCHECK(match_packet(skb, protoff + sizeof (sctp_sctphdr_t), |
155 | info->chunkmap, info->chunk_match_type, | 155 | info->chunkmap, info->chunk_match_type, |
156 | info->flag_info, info->flag_count, | 156 | info->flag_info, info->flag_count, |
157 | hotdrop), | 157 | hotdrop), |
158 | XT_SCTP_CHUNK_TYPES, info->flags, info->invflags); | 158 | XT_SCTP_CHUNK_TYPES, info->flags, info->invflags); |
159 | } | 159 | } |
@@ -170,9 +170,9 @@ checkentry(const char *tablename, | |||
170 | return !(info->flags & ~XT_SCTP_VALID_FLAGS) | 170 | return !(info->flags & ~XT_SCTP_VALID_FLAGS) |
171 | && !(info->invflags & ~XT_SCTP_VALID_FLAGS) | 171 | && !(info->invflags & ~XT_SCTP_VALID_FLAGS) |
172 | && !(info->invflags & ~info->flags) | 172 | && !(info->invflags & ~info->flags) |
173 | && ((!(info->flags & XT_SCTP_CHUNK_TYPES)) || | 173 | && ((!(info->flags & XT_SCTP_CHUNK_TYPES)) || |
174 | (info->chunk_match_type & | 174 | (info->chunk_match_type & |
175 | (SCTP_CHUNK_MATCH_ALL | 175 | (SCTP_CHUNK_MATCH_ALL |
176 | | SCTP_CHUNK_MATCH_ANY | 176 | | SCTP_CHUNK_MATCH_ANY |
177 | | SCTP_CHUNK_MATCH_ONLY))); | 177 | | SCTP_CHUNK_MATCH_ONLY))); |
178 | } | 178 | } |
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c index 4453252400aa..999a005dbd0c 100644 --- a/net/netfilter/xt_string.c +++ b/net/netfilter/xt_string.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* String matching match for iptables | 1 | /* String matching match for iptables |
2 | * | 2 | * |
3 | * (C) 2005 Pablo Neira Ayuso <pablo@eurodev.net> | 3 | * (C) 2005 Pablo Neira Ayuso <pablo@eurodev.net> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
@@ -35,8 +35,8 @@ static int match(const struct sk_buff *skb, | |||
35 | 35 | ||
36 | memset(&state, 0, sizeof(struct ts_state)); | 36 | memset(&state, 0, sizeof(struct ts_state)); |
37 | 37 | ||
38 | return (skb_find_text((struct sk_buff *)skb, conf->from_offset, | 38 | return (skb_find_text((struct sk_buff *)skb, conf->from_offset, |
39 | conf->to_offset, conf->config, &state) | 39 | conf->to_offset, conf->config, &state) |
40 | != UINT_MAX) ^ conf->invert; | 40 | != UINT_MAX) ^ conf->invert; |
41 | } | 41 | } |
42 | 42 | ||
@@ -55,7 +55,7 @@ static int checkentry(const char *tablename, | |||
55 | if (conf->from_offset > conf->to_offset) | 55 | if (conf->from_offset > conf->to_offset) |
56 | return 0; | 56 | return 0; |
57 | if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0') | 57 | if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0') |
58 | return 0; | 58 | return 0; |
59 | if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE) | 59 | if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE) |
60 | return 0; | 60 | return 0; |
61 | ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, | 61 | ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, |
diff --git a/net/netfilter/xt_tcpmss.c b/net/netfilter/xt_tcpmss.c index a3682fe2f192..80571d0749f7 100644 --- a/net/netfilter/xt_tcpmss.c +++ b/net/netfilter/xt_tcpmss.c | |||
@@ -64,9 +64,9 @@ match(const struct sk_buff *skb, | |||
64 | u_int16_t mssval; | 64 | u_int16_t mssval; |
65 | 65 | ||
66 | mssval = (op[i+2] << 8) | op[i+3]; | 66 | mssval = (op[i+2] << 8) | op[i+3]; |
67 | 67 | ||
68 | return (mssval >= info->mss_min && | 68 | return (mssval >= info->mss_min && |
69 | mssval <= info->mss_max) ^ info->invert; | 69 | mssval <= info->mss_max) ^ info->invert; |
70 | } | 70 | } |
71 | if (op[i] < 2) | 71 | if (op[i] < 2) |
72 | i++; | 72 | i++; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index a24f38510719..c394b413f651 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1997,9 +1997,14 @@ void xfrm_audit_log(uid_t auid, u32 sid, int type, int result, | |||
1997 | if (audit_enabled == 0) | 1997 | if (audit_enabled == 0) |
1998 | return; | 1998 | return; |
1999 | 1999 | ||
2000 | BUG_ON((type == AUDIT_MAC_IPSEC_ADDSA || | ||
2001 | type == AUDIT_MAC_IPSEC_DELSA) && !x); | ||
2002 | BUG_ON((type == AUDIT_MAC_IPSEC_ADDSPD || | ||
2003 | type == AUDIT_MAC_IPSEC_DELSPD) && !xp); | ||
2004 | |||
2000 | audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type); | 2005 | audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type); |
2001 | if (audit_buf == NULL) | 2006 | if (audit_buf == NULL) |
2002 | return; | 2007 | return; |
2003 | 2008 | ||
2004 | switch(type) { | 2009 | switch(type) { |
2005 | case AUDIT_MAC_IPSEC_ADDSA: | 2010 | case AUDIT_MAC_IPSEC_ADDSA: |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index d55436d00e86..256745321611 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -1273,10 +1273,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1273 | xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, delete); | 1273 | xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, delete); |
1274 | security_xfrm_policy_free(&tmp); | 1274 | security_xfrm_policy_free(&tmp); |
1275 | } | 1275 | } |
1276 | if (delete) | ||
1277 | xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, | ||
1278 | AUDIT_MAC_IPSEC_DELSPD, (xp) ? 1 : 0, xp, NULL); | ||
1279 | |||
1280 | if (xp == NULL) | 1276 | if (xp == NULL) |
1281 | return -ENOENT; | 1277 | return -ENOENT; |
1282 | 1278 | ||
@@ -1292,8 +1288,14 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1292 | MSG_DONTWAIT); | 1288 | MSG_DONTWAIT); |
1293 | } | 1289 | } |
1294 | } else { | 1290 | } else { |
1295 | if ((err = security_xfrm_policy_delete(xp)) != 0) | 1291 | err = security_xfrm_policy_delete(xp); |
1292 | |||
1293 | xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, | ||
1294 | AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL); | ||
1295 | |||
1296 | if (err != 0) | ||
1296 | goto out; | 1297 | goto out; |
1298 | |||
1297 | c.data.byid = p->index; | 1299 | c.data.byid = p->index; |
1298 | c.event = nlh->nlmsg_type; | 1300 | c.event = nlh->nlmsg_type; |
1299 | c.seq = nlh->nlmsg_seq; | 1301 | c.seq = nlh->nlmsg_seq; |