aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-03-24 16:24:36 -0400
committerDavid S. Miller <davem@davemloft.net>2009-03-24 16:24:36 -0400
commitb5bb14386eabcb4229ade2bc0a2b237ca166d37d (patch)
tree1966e65479f0d12cec0a204443a95b8eb57946db /net
parentbb4f92b3a33bfc31f55098da85be44702bea2d16 (diff)
parent1d45209d89e647e9f27e4afa1f47338df73bc112 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
Diffstat (limited to 'net')
-rw-r--r--net/bridge/netfilter/ebtable_broute.c1
-rw-r--r--net/bridge/netfilter/ebtable_filter.c1
-rw-r--r--net/bridge/netfilter/ebtable_nat.c1
-rw-r--r--net/ipv4/netfilter/Kconfig30
-rw-r--r--net/ipv4/netfilter/Makefile2
-rw-r--r--net/ipv4/netfilter/arp_tables.c159
-rw-r--r--net/ipv4/netfilter/arptable_filter.c2
-rw-r--r--net/ipv4/netfilter/ip_queue.c2
-rw-r--r--net/ipv4/netfilter/ip_tables.c153
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c2
-rw-r--r--net/ipv4/netfilter/ipt_TTL.c97
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c2
-rw-r--r--net/ipv4/netfilter/ipt_ttl.c63
-rw-r--r--net/ipv4/netfilter/iptable_filter.c1
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c1
-rw-r--r--net/ipv4/netfilter/iptable_raw.c1
-rw-r--r--net/ipv4/netfilter/iptable_security.c1
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c1
-rw-r--r--net/ipv6/netfilter/Kconfig38
-rw-r--r--net/ipv6/netfilter/Makefile2
-rw-r--r--net/ipv6/netfilter/ip6_queue.c1
-rw-r--r--net/ipv6/netfilter/ip6_tables.c152
-rw-r--r--net/ipv6/netfilter/ip6t_HL.c95
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c2
-rw-r--r--net/ipv6/netfilter/ip6t_hl.c68
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c1
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c1
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c1
-rw-r--r--net/ipv6/netfilter/ip6table_security.c1
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c1
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c4
-rw-r--r--net/netfilter/Kconfig63
-rw-r--r--net/netfilter/Makefile4
-rw-r--r--net/netfilter/core.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c14
-rw-r--r--net/netfilter/nf_conntrack_expect.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c161
-rw-r--r--net/netfilter/nf_conntrack_proto.c21
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c145
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c2
-rw-r--r--net/netfilter/nf_log.c201
-rw-r--r--net/netfilter/nfnetlink.c6
-rw-r--r--net/netfilter/nfnetlink_log.c18
-rw-r--r--net/netfilter/x_tables.c26
-rw-r--r--net/netfilter/xt_HL.c171
-rw-r--r--net/netfilter/xt_LED.c161
-rw-r--r--net/netfilter/xt_cluster.c164
-rw-r--r--net/netfilter/xt_hashlimit.c7
-rw-r--r--net/netfilter/xt_hl.c108
-rw-r--r--net/netfilter/xt_limit.c40
-rw-r--r--net/netfilter/xt_physdev.c37
-rw-r--r--net/netfilter/xt_quota.c31
-rw-r--r--net/netfilter/xt_statistic.c28
-rw-r--r--net/netlink/af_netlink.c1
-rw-r--r--net/sysctl_net.c2
58 files changed, 1555 insertions, 756 deletions
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 8604dfc1fc3b..c751111440f8 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -46,7 +46,6 @@ static struct ebt_table broute_table =
46 .name = "broute", 46 .name = "broute",
47 .table = &initial_table, 47 .table = &initial_table,
48 .valid_hooks = 1 << NF_BR_BROUTING, 48 .valid_hooks = 1 << NF_BR_BROUTING,
49 .lock = __RW_LOCK_UNLOCKED(broute_table.lock),
50 .check = check, 49 .check = check,
51 .me = THIS_MODULE, 50 .me = THIS_MODULE,
52}; 51};
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 2b2e8040a9c6..a5eea72938a6 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -55,7 +55,6 @@ static struct ebt_table frame_filter =
55 .name = "filter", 55 .name = "filter",
56 .table = &initial_table, 56 .table = &initial_table,
57 .valid_hooks = FILTER_VALID_HOOKS, 57 .valid_hooks = FILTER_VALID_HOOKS,
58 .lock = __RW_LOCK_UNLOCKED(frame_filter.lock),
59 .check = check, 58 .check = check,
60 .me = THIS_MODULE, 59 .me = THIS_MODULE,
61}; 60};
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 3fe1ae87e35f..6024c551f9a9 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -55,7 +55,6 @@ static struct ebt_table frame_nat =
55 .name = "nat", 55 .name = "nat",
56 .table = &initial_table, 56 .table = &initial_table,
57 .valid_hooks = NAT_VALID_HOOKS, 57 .valid_hooks = NAT_VALID_HOOKS,
58 .lock = __RW_LOCK_UNLOCKED(frame_nat.lock),
59 .check = check, 58 .check = check,
60 .me = THIS_MODULE, 59 .me = THIS_MODULE,
61}; 60};
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 3816e1dc9295..1833bdbf9805 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -31,7 +31,7 @@ config NF_CONNTRACK_PROC_COMPAT
31 default y 31 default y
32 help 32 help
33 This option enables /proc and sysctl compatibility with the old 33 This option enables /proc and sysctl compatibility with the old
34 layer 3 dependant connection tracking. This is needed to keep 34 layer 3 dependent connection tracking. This is needed to keep
35 old programs that have not been adapted to the new names working. 35 old programs that have not been adapted to the new names working.
36 36
37 If unsure, say Y. 37 If unsure, say Y.
@@ -95,11 +95,11 @@ config IP_NF_MATCH_ECN
95config IP_NF_MATCH_TTL 95config IP_NF_MATCH_TTL
96 tristate '"ttl" match support' 96 tristate '"ttl" match support'
97 depends on NETFILTER_ADVANCED 97 depends on NETFILTER_ADVANCED
98 help 98 select NETFILTER_XT_MATCH_HL
99 This adds CONFIG_IP_NF_MATCH_TTL option, which enabled the user 99 ---help---
100 to match packets by their TTL value. 100 This is a backwards-compat option for the user's convenience
101 101 (e.g. when running oldconfig). It selects
102 To compile it as a module, choose M here. If unsure, say N. 102 CONFIG_NETFILTER_XT_MATCH_HL.
103 103
104# `filter', generic and specific targets 104# `filter', generic and specific targets
105config IP_NF_FILTER 105config IP_NF_FILTER
@@ -323,19 +323,13 @@ config IP_NF_TARGET_ECN
323 To compile it as a module, choose M here. If unsure, say N. 323 To compile it as a module, choose M here. If unsure, say N.
324 324
325config IP_NF_TARGET_TTL 325config IP_NF_TARGET_TTL
326 tristate 'TTL target support' 326 tristate '"TTL" target support'
327 depends on IP_NF_MANGLE
328 depends on NETFILTER_ADVANCED 327 depends on NETFILTER_ADVANCED
329 help 328 select NETFILTER_XT_TARGET_HL
330 This option adds a `TTL' target, which enables the user to modify 329 ---help---
331 the TTL value of the IP header. 330 This is a backwards-compat option for the user's convenience
332 331 (e.g. when running oldconfig). It selects
333 While it is safe to decrement/lower the TTL, this target also enables 332 CONFIG_NETFILTER_XT_TARGET_HL.
334 functionality to increment and set the TTL value of the IP header to
335 arbitrary values. This is EXTREMELY DANGEROUS since you can easily
336 create immortal packets that loop forever on the network.
337
338 To compile it as a module, choose M here. If unsure, say N.
339 333
340# raw + specific targets 334# raw + specific targets
341config IP_NF_RAW 335config IP_NF_RAW
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 5f9b650d90fc..48111594ee9b 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -51,7 +51,6 @@ obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
51obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o 51obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
52obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o 52obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
53obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o 53obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
54obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
55 54
56# targets 55# targets
57obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o 56obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
@@ -61,7 +60,6 @@ obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
61obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o 60obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
62obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o 61obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
63obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o 62obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
64obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
65obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o 63obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
66 64
67# generic ARP tables 65# generic ARP tables
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 7ea88b61cb0d..64a7c6ce0b98 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -73,6 +73,36 @@ static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
73 return (ret != 0); 73 return (ret != 0);
74} 74}
75 75
76/*
77 * Unfortunatly, _b and _mask are not aligned to an int (or long int)
78 * Some arches dont care, unrolling the loop is a win on them.
79 */
80static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask)
81{
82#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
83 const unsigned long *a = (const unsigned long *)_a;
84 const unsigned long *b = (const unsigned long *)_b;
85 const unsigned long *mask = (const unsigned long *)_mask;
86 unsigned long ret;
87
88 ret = (a[0] ^ b[0]) & mask[0];
89 if (IFNAMSIZ > sizeof(unsigned long))
90 ret |= (a[1] ^ b[1]) & mask[1];
91 if (IFNAMSIZ > 2 * sizeof(unsigned long))
92 ret |= (a[2] ^ b[2]) & mask[2];
93 if (IFNAMSIZ > 3 * sizeof(unsigned long))
94 ret |= (a[3] ^ b[3]) & mask[3];
95 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
96#else
97 unsigned long ret = 0;
98 int i;
99
100 for (i = 0; i < IFNAMSIZ; i++)
101 ret |= (_a[i] ^ _b[i]) & _mask[i];
102#endif
103 return ret;
104}
105
76/* Returns whether packet matches rule or not. */ 106/* Returns whether packet matches rule or not. */
77static inline int arp_packet_match(const struct arphdr *arphdr, 107static inline int arp_packet_match(const struct arphdr *arphdr,
78 struct net_device *dev, 108 struct net_device *dev,
@@ -83,7 +113,7 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
83 const char *arpptr = (char *)(arphdr + 1); 113 const char *arpptr = (char *)(arphdr + 1);
84 const char *src_devaddr, *tgt_devaddr; 114 const char *src_devaddr, *tgt_devaddr;
85 __be32 src_ipaddr, tgt_ipaddr; 115 __be32 src_ipaddr, tgt_ipaddr;
86 int i, ret; 116 long ret;
87 117
88#define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) 118#define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg)))
89 119
@@ -156,10 +186,7 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
156 } 186 }
157 187
158 /* Look for ifname matches. */ 188 /* Look for ifname matches. */
159 for (i = 0, ret = 0; i < IFNAMSIZ; i++) { 189 ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask);
160 ret |= (indev[i] ^ arpinfo->iniface[i])
161 & arpinfo->iniface_mask[i];
162 }
163 190
164 if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { 191 if (FWINV(ret != 0, ARPT_INV_VIA_IN)) {
165 dprintf("VIA in mismatch (%s vs %s).%s\n", 192 dprintf("VIA in mismatch (%s vs %s).%s\n",
@@ -168,10 +195,7 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
168 return 0; 195 return 0;
169 } 196 }
170 197
171 for (i = 0, ret = 0; i < IFNAMSIZ; i++) { 198 ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask);
172 ret |= (outdev[i] ^ arpinfo->outiface[i])
173 & arpinfo->outiface_mask[i];
174 }
175 199
176 if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { 200 if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) {
177 dprintf("VIA out mismatch (%s vs %s).%s\n", 201 dprintf("VIA out mismatch (%s vs %s).%s\n",
@@ -221,7 +245,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
221 const struct net_device *out, 245 const struct net_device *out,
222 struct xt_table *table) 246 struct xt_table *table)
223{ 247{
224 static const char nulldevname[IFNAMSIZ]; 248 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
225 unsigned int verdict = NF_DROP; 249 unsigned int verdict = NF_DROP;
226 const struct arphdr *arp; 250 const struct arphdr *arp;
227 bool hotdrop = false; 251 bool hotdrop = false;
@@ -237,9 +261,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
237 indev = in ? in->name : nulldevname; 261 indev = in ? in->name : nulldevname;
238 outdev = out ? out->name : nulldevname; 262 outdev = out ? out->name : nulldevname;
239 263
240 read_lock_bh(&table->lock); 264 rcu_read_lock();
241 private = table->private; 265 private = rcu_dereference(table->private);
242 table_base = (void *)private->entries[smp_processor_id()]; 266 table_base = rcu_dereference(private->entries[smp_processor_id()]);
267
243 e = get_entry(table_base, private->hook_entry[hook]); 268 e = get_entry(table_base, private->hook_entry[hook]);
244 back = get_entry(table_base, private->underflow[hook]); 269 back = get_entry(table_base, private->underflow[hook]);
245 270
@@ -311,7 +336,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
311 e = (void *)e + e->next_offset; 336 e = (void *)e + e->next_offset;
312 } 337 }
313 } while (!hotdrop); 338 } while (!hotdrop);
314 read_unlock_bh(&table->lock); 339
340 rcu_read_unlock();
315 341
316 if (hotdrop) 342 if (hotdrop)
317 return NF_DROP; 343 return NF_DROP;
@@ -714,11 +740,65 @@ static void get_counters(const struct xt_table_info *t,
714 } 740 }
715} 741}
716 742
717static inline struct xt_counters *alloc_counters(struct xt_table *table) 743
744/* We're lazy, and add to the first CPU; overflow works its fey magic
745 * and everything is OK. */
746static int
747add_counter_to_entry(struct arpt_entry *e,
748 const struct xt_counters addme[],
749 unsigned int *i)
750{
751 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
752
753 (*i)++;
754 return 0;
755}
756
757/* Take values from counters and add them back onto the current cpu */
758static void put_counters(struct xt_table_info *t,
759 const struct xt_counters counters[])
760{
761 unsigned int i, cpu;
762
763 local_bh_disable();
764 cpu = smp_processor_id();
765 i = 0;
766 ARPT_ENTRY_ITERATE(t->entries[cpu],
767 t->size,
768 add_counter_to_entry,
769 counters,
770 &i);
771 local_bh_enable();
772}
773
774static inline int
775zero_entry_counter(struct arpt_entry *e, void *arg)
776{
777 e->counters.bcnt = 0;
778 e->counters.pcnt = 0;
779 return 0;
780}
781
782static void
783clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
784{
785 unsigned int cpu;
786 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
787
788 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
789 for_each_possible_cpu(cpu) {
790 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
791 ARPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
792 zero_entry_counter, NULL);
793 }
794}
795
796static struct xt_counters *alloc_counters(struct xt_table *table)
718{ 797{
719 unsigned int countersize; 798 unsigned int countersize;
720 struct xt_counters *counters; 799 struct xt_counters *counters;
721 const struct xt_table_info *private = table->private; 800 struct xt_table_info *private = table->private;
801 struct xt_table_info *info;
722 802
723 /* We need atomic snapshot of counters: rest doesn't change 803 /* We need atomic snapshot of counters: rest doesn't change
724 * (other than comefrom, which userspace doesn't care 804 * (other than comefrom, which userspace doesn't care
@@ -728,14 +808,30 @@ static inline struct xt_counters *alloc_counters(struct xt_table *table)
728 counters = vmalloc_node(countersize, numa_node_id()); 808 counters = vmalloc_node(countersize, numa_node_id());
729 809
730 if (counters == NULL) 810 if (counters == NULL)
731 return ERR_PTR(-ENOMEM); 811 goto nomem;
812
813 info = xt_alloc_table_info(private->size);
814 if (!info)
815 goto free_counters;
816
817 clone_counters(info, private);
818
819 mutex_lock(&table->lock);
820 xt_table_entry_swap_rcu(private, info);
821 synchronize_net(); /* Wait until smoke has cleared */
822
823 get_counters(info, counters);
824 put_counters(private, counters);
825 mutex_unlock(&table->lock);
732 826
733 /* First, sum counters... */ 827 xt_free_table_info(info);
734 write_lock_bh(&table->lock);
735 get_counters(private, counters);
736 write_unlock_bh(&table->lock);
737 828
738 return counters; 829 return counters;
830
831 free_counters:
832 vfree(counters);
833 nomem:
834 return ERR_PTR(-ENOMEM);
739} 835}
740 836
741static int copy_entries_to_user(unsigned int total_size, 837static int copy_entries_to_user(unsigned int total_size,
@@ -1075,20 +1171,6 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1075 return ret; 1171 return ret;
1076} 1172}
1077 1173
1078/* We're lazy, and add to the first CPU; overflow works its fey magic
1079 * and everything is OK.
1080 */
1081static inline int add_counter_to_entry(struct arpt_entry *e,
1082 const struct xt_counters addme[],
1083 unsigned int *i)
1084{
1085
1086 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1087
1088 (*i)++;
1089 return 0;
1090}
1091
1092static int do_add_counters(struct net *net, void __user *user, unsigned int len, 1174static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1093 int compat) 1175 int compat)
1094{ 1176{
@@ -1148,13 +1230,14 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1148 goto free; 1230 goto free;
1149 } 1231 }
1150 1232
1151 write_lock_bh(&t->lock); 1233 mutex_lock(&t->lock);
1152 private = t->private; 1234 private = t->private;
1153 if (private->number != num_counters) { 1235 if (private->number != num_counters) {
1154 ret = -EINVAL; 1236 ret = -EINVAL;
1155 goto unlock_up_free; 1237 goto unlock_up_free;
1156 } 1238 }
1157 1239
1240 preempt_disable();
1158 i = 0; 1241 i = 0;
1159 /* Choose the copy that is on our node */ 1242 /* Choose the copy that is on our node */
1160 loc_cpu_entry = private->entries[smp_processor_id()]; 1243 loc_cpu_entry = private->entries[smp_processor_id()];
@@ -1163,8 +1246,10 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1163 add_counter_to_entry, 1246 add_counter_to_entry,
1164 paddc, 1247 paddc,
1165 &i); 1248 &i);
1249 preempt_enable();
1166 unlock_up_free: 1250 unlock_up_free:
1167 write_unlock_bh(&t->lock); 1251 mutex_unlock(&t->lock);
1252
1168 xt_table_unlock(t); 1253 xt_table_unlock(t);
1169 module_put(t->me); 1254 module_put(t->me);
1170 free: 1255 free:
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index e091187e864f..6ecfdae7c589 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -48,8 +48,6 @@ static struct
48static struct xt_table packet_filter = { 48static struct xt_table packet_filter = {
49 .name = "filter", 49 .name = "filter",
50 .valid_hooks = FILTER_VALID_HOOKS, 50 .valid_hooks = FILTER_VALID_HOOKS,
51 .lock = __RW_LOCK_UNLOCKED(packet_filter.lock),
52 .private = NULL,
53 .me = THIS_MODULE, 51 .me = THIS_MODULE,
54 .af = NFPROTO_ARP, 52 .af = NFPROTO_ARP,
55}; 53};
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 432ce9d1c11c..5f22c91c6e15 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -24,6 +24,7 @@
24#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
25#include <linux/seq_file.h> 25#include <linux/seq_file.h>
26#include <linux/security.h> 26#include <linux/security.h>
27#include <linux/net.h>
27#include <linux/mutex.h> 28#include <linux/mutex.h>
28#include <net/net_namespace.h> 29#include <net/net_namespace.h>
29#include <net/sock.h> 30#include <net/sock.h>
@@ -640,6 +641,7 @@ static void __exit ip_queue_fini(void)
640MODULE_DESCRIPTION("IPv4 packet queue handler"); 641MODULE_DESCRIPTION("IPv4 packet queue handler");
641MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); 642MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
642MODULE_LICENSE("GPL"); 643MODULE_LICENSE("GPL");
644MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_FIREWALL);
643 645
644module_init(ip_queue_init); 646module_init(ip_queue_init);
645module_exit(ip_queue_fini); 647module_exit(ip_queue_fini);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index ef8b6ca068b2..e5294aec967d 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -74,6 +74,25 @@ do { \
74 74
75 Hence the start of any table is given by get_table() below. */ 75 Hence the start of any table is given by get_table() below. */
76 76
77static unsigned long ifname_compare(const char *_a, const char *_b,
78 const unsigned char *_mask)
79{
80 const unsigned long *a = (const unsigned long *)_a;
81 const unsigned long *b = (const unsigned long *)_b;
82 const unsigned long *mask = (const unsigned long *)_mask;
83 unsigned long ret;
84
85 ret = (a[0] ^ b[0]) & mask[0];
86 if (IFNAMSIZ > sizeof(unsigned long))
87 ret |= (a[1] ^ b[1]) & mask[1];
88 if (IFNAMSIZ > 2 * sizeof(unsigned long))
89 ret |= (a[2] ^ b[2]) & mask[2];
90 if (IFNAMSIZ > 3 * sizeof(unsigned long))
91 ret |= (a[3] ^ b[3]) & mask[3];
92 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
93 return ret;
94}
95
77/* Returns whether matches rule or not. */ 96/* Returns whether matches rule or not. */
78/* Performance critical - called for every packet */ 97/* Performance critical - called for every packet */
79static inline bool 98static inline bool
@@ -83,7 +102,6 @@ ip_packet_match(const struct iphdr *ip,
83 const struct ipt_ip *ipinfo, 102 const struct ipt_ip *ipinfo,
84 int isfrag) 103 int isfrag)
85{ 104{
86 size_t i;
87 unsigned long ret; 105 unsigned long ret;
88 106
89#define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) 107#define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
@@ -103,12 +121,7 @@ ip_packet_match(const struct iphdr *ip,
103 return false; 121 return false;
104 } 122 }
105 123
106 /* Look for ifname matches; this should unroll nicely. */ 124 ret = ifname_compare(indev, ipinfo->iniface, ipinfo->iniface_mask);
107 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
108 ret |= (((const unsigned long *)indev)[i]
109 ^ ((const unsigned long *)ipinfo->iniface)[i])
110 & ((const unsigned long *)ipinfo->iniface_mask)[i];
111 }
112 125
113 if (FWINV(ret != 0, IPT_INV_VIA_IN)) { 126 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
114 dprintf("VIA in mismatch (%s vs %s).%s\n", 127 dprintf("VIA in mismatch (%s vs %s).%s\n",
@@ -117,11 +130,7 @@ ip_packet_match(const struct iphdr *ip,
117 return false; 130 return false;
118 } 131 }
119 132
120 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) { 133 ret = ifname_compare(outdev, ipinfo->outiface, ipinfo->outiface_mask);
121 ret |= (((const unsigned long *)outdev)[i]
122 ^ ((const unsigned long *)ipinfo->outiface)[i])
123 & ((const unsigned long *)ipinfo->outiface_mask)[i];
124 }
125 134
126 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { 135 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
127 dprintf("VIA out mismatch (%s vs %s).%s\n", 136 dprintf("VIA out mismatch (%s vs %s).%s\n",
@@ -347,10 +356,12 @@ ipt_do_table(struct sk_buff *skb,
347 mtpar.family = tgpar.family = NFPROTO_IPV4; 356 mtpar.family = tgpar.family = NFPROTO_IPV4;
348 tgpar.hooknum = hook; 357 tgpar.hooknum = hook;
349 358
350 read_lock_bh(&table->lock);
351 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 359 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
352 private = table->private; 360
353 table_base = (void *)private->entries[smp_processor_id()]; 361 rcu_read_lock();
362 private = rcu_dereference(table->private);
363 table_base = rcu_dereference(private->entries[smp_processor_id()]);
364
354 e = get_entry(table_base, private->hook_entry[hook]); 365 e = get_entry(table_base, private->hook_entry[hook]);
355 366
356 /* For return from builtin chain */ 367 /* For return from builtin chain */
@@ -445,7 +456,7 @@ ipt_do_table(struct sk_buff *skb,
445 } 456 }
446 } while (!hotdrop); 457 } while (!hotdrop);
447 458
448 read_unlock_bh(&table->lock); 459 rcu_read_unlock();
449 460
450#ifdef DEBUG_ALLOW_ALL 461#ifdef DEBUG_ALLOW_ALL
451 return NF_ACCEPT; 462 return NF_ACCEPT;
@@ -924,13 +935,68 @@ get_counters(const struct xt_table_info *t,
924 counters, 935 counters,
925 &i); 936 &i);
926 } 937 }
938
939}
940
941/* We're lazy, and add to the first CPU; overflow works its fey magic
942 * and everything is OK. */
943static int
944add_counter_to_entry(struct ipt_entry *e,
945 const struct xt_counters addme[],
946 unsigned int *i)
947{
948 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
949
950 (*i)++;
951 return 0;
952}
953
954/* Take values from counters and add them back onto the current cpu */
955static void put_counters(struct xt_table_info *t,
956 const struct xt_counters counters[])
957{
958 unsigned int i, cpu;
959
960 local_bh_disable();
961 cpu = smp_processor_id();
962 i = 0;
963 IPT_ENTRY_ITERATE(t->entries[cpu],
964 t->size,
965 add_counter_to_entry,
966 counters,
967 &i);
968 local_bh_enable();
969}
970
971
972static inline int
973zero_entry_counter(struct ipt_entry *e, void *arg)
974{
975 e->counters.bcnt = 0;
976 e->counters.pcnt = 0;
977 return 0;
978}
979
980static void
981clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
982{
983 unsigned int cpu;
984 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
985
986 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
987 for_each_possible_cpu(cpu) {
988 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
989 IPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
990 zero_entry_counter, NULL);
991 }
927} 992}
928 993
929static struct xt_counters * alloc_counters(struct xt_table *table) 994static struct xt_counters * alloc_counters(struct xt_table *table)
930{ 995{
931 unsigned int countersize; 996 unsigned int countersize;
932 struct xt_counters *counters; 997 struct xt_counters *counters;
933 const struct xt_table_info *private = table->private; 998 struct xt_table_info *private = table->private;
999 struct xt_table_info *info;
934 1000
935 /* We need atomic snapshot of counters: rest doesn't change 1001 /* We need atomic snapshot of counters: rest doesn't change
936 (other than comefrom, which userspace doesn't care 1002 (other than comefrom, which userspace doesn't care
@@ -939,14 +1005,30 @@ static struct xt_counters * alloc_counters(struct xt_table *table)
939 counters = vmalloc_node(countersize, numa_node_id()); 1005 counters = vmalloc_node(countersize, numa_node_id());
940 1006
941 if (counters == NULL) 1007 if (counters == NULL)
942 return ERR_PTR(-ENOMEM); 1008 goto nomem;
1009
1010 info = xt_alloc_table_info(private->size);
1011 if (!info)
1012 goto free_counters;
943 1013
944 /* First, sum counters... */ 1014 clone_counters(info, private);
945 write_lock_bh(&table->lock); 1015
946 get_counters(private, counters); 1016 mutex_lock(&table->lock);
947 write_unlock_bh(&table->lock); 1017 xt_table_entry_swap_rcu(private, info);
1018 synchronize_net(); /* Wait until smoke has cleared */
1019
1020 get_counters(info, counters);
1021 put_counters(private, counters);
1022 mutex_unlock(&table->lock);
1023
1024 xt_free_table_info(info);
948 1025
949 return counters; 1026 return counters;
1027
1028 free_counters:
1029 vfree(counters);
1030 nomem:
1031 return ERR_PTR(-ENOMEM);
950} 1032}
951 1033
952static int 1034static int
@@ -1312,27 +1394,6 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1312 return ret; 1394 return ret;
1313} 1395}
1314 1396
1315/* We're lazy, and add to the first CPU; overflow works its fey magic
1316 * and everything is OK. */
1317static int
1318add_counter_to_entry(struct ipt_entry *e,
1319 const struct xt_counters addme[],
1320 unsigned int *i)
1321{
1322#if 0
1323 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1324 *i,
1325 (long unsigned int)e->counters.pcnt,
1326 (long unsigned int)e->counters.bcnt,
1327 (long unsigned int)addme[*i].pcnt,
1328 (long unsigned int)addme[*i].bcnt);
1329#endif
1330
1331 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1332
1333 (*i)++;
1334 return 0;
1335}
1336 1397
1337static int 1398static int
1338do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) 1399do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
@@ -1393,13 +1454,14 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
1393 goto free; 1454 goto free;
1394 } 1455 }
1395 1456
1396 write_lock_bh(&t->lock); 1457 mutex_lock(&t->lock);
1397 private = t->private; 1458 private = t->private;
1398 if (private->number != num_counters) { 1459 if (private->number != num_counters) {
1399 ret = -EINVAL; 1460 ret = -EINVAL;
1400 goto unlock_up_free; 1461 goto unlock_up_free;
1401 } 1462 }
1402 1463
1464 preempt_disable();
1403 i = 0; 1465 i = 0;
1404 /* Choose the copy that is on our node */ 1466 /* Choose the copy that is on our node */
1405 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1467 loc_cpu_entry = private->entries[raw_smp_processor_id()];
@@ -1408,8 +1470,9 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
1408 add_counter_to_entry, 1470 add_counter_to_entry,
1409 paddc, 1471 paddc,
1410 &i); 1472 &i);
1473 preempt_enable();
1411 unlock_up_free: 1474 unlock_up_free:
1412 write_unlock_bh(&t->lock); 1475 mutex_unlock(&t->lock);
1413 xt_table_unlock(t); 1476 xt_table_unlock(t);
1414 module_put(t->me); 1477 module_put(t->me);
1415 free: 1478 free:
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 27a78fbbd92b..acc44c69eb68 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -464,7 +464,7 @@ static struct xt_target log_tg_reg __read_mostly = {
464 .me = THIS_MODULE, 464 .me = THIS_MODULE,
465}; 465};
466 466
467static const struct nf_logger ipt_log_logger ={ 467static struct nf_logger ipt_log_logger __read_mostly = {
468 .name = "ipt_LOG", 468 .name = "ipt_LOG",
469 .logfn = &ipt_log_packet, 469 .logfn = &ipt_log_packet,
470 .me = THIS_MODULE, 470 .me = THIS_MODULE,
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c
deleted file mode 100644
index 6d76aae90cc0..000000000000
--- a/net/ipv4/netfilter/ipt_TTL.c
+++ /dev/null
@@ -1,97 +0,0 @@
1/* TTL modification target for IP tables
2 * (C) 2000,2005 by Harald Welte <laforge@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#include <linux/module.h>
11#include <linux/skbuff.h>
12#include <linux/ip.h>
13#include <net/checksum.h>
14
15#include <linux/netfilter/x_tables.h>
16#include <linux/netfilter_ipv4/ipt_TTL.h>
17
18MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
19MODULE_DESCRIPTION("Xtables: IPv4 TTL field modification target");
20MODULE_LICENSE("GPL");
21
22static unsigned int
23ttl_tg(struct sk_buff *skb, const struct xt_target_param *par)
24{
25 struct iphdr *iph;
26 const struct ipt_TTL_info *info = par->targinfo;
27 int new_ttl;
28
29 if (!skb_make_writable(skb, skb->len))
30 return NF_DROP;
31
32 iph = ip_hdr(skb);
33
34 switch (info->mode) {
35 case IPT_TTL_SET:
36 new_ttl = info->ttl;
37 break;
38 case IPT_TTL_INC:
39 new_ttl = iph->ttl + info->ttl;
40 if (new_ttl > 255)
41 new_ttl = 255;
42 break;
43 case IPT_TTL_DEC:
44 new_ttl = iph->ttl - info->ttl;
45 if (new_ttl < 0)
46 new_ttl = 0;
47 break;
48 default:
49 new_ttl = iph->ttl;
50 break;
51 }
52
53 if (new_ttl != iph->ttl) {
54 csum_replace2(&iph->check, htons(iph->ttl << 8),
55 htons(new_ttl << 8));
56 iph->ttl = new_ttl;
57 }
58
59 return XT_CONTINUE;
60}
61
62static bool ttl_tg_check(const struct xt_tgchk_param *par)
63{
64 const struct ipt_TTL_info *info = par->targinfo;
65
66 if (info->mode > IPT_TTL_MAXMODE) {
67 printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n",
68 info->mode);
69 return false;
70 }
71 if (info->mode != IPT_TTL_SET && info->ttl == 0)
72 return false;
73 return true;
74}
75
76static struct xt_target ttl_tg_reg __read_mostly = {
77 .name = "TTL",
78 .family = NFPROTO_IPV4,
79 .target = ttl_tg,
80 .targetsize = sizeof(struct ipt_TTL_info),
81 .table = "mangle",
82 .checkentry = ttl_tg_check,
83 .me = THIS_MODULE,
84};
85
86static int __init ttl_tg_init(void)
87{
88 return xt_register_target(&ttl_tg_reg);
89}
90
91static void __exit ttl_tg_exit(void)
92{
93 xt_unregister_target(&ttl_tg_reg);
94}
95
96module_init(ttl_tg_init);
97module_exit(ttl_tg_exit);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 18a2826b57c6..d32cc4bb328a 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -379,7 +379,7 @@ static struct xt_target ulog_tg_reg __read_mostly = {
379 .me = THIS_MODULE, 379 .me = THIS_MODULE,
380}; 380};
381 381
382static struct nf_logger ipt_ulog_logger = { 382static struct nf_logger ipt_ulog_logger __read_mostly = {
383 .name = "ipt_ULOG", 383 .name = "ipt_ULOG",
384 .logfn = ipt_logfn, 384 .logfn = ipt_logfn,
385 .me = THIS_MODULE, 385 .me = THIS_MODULE,
diff --git a/net/ipv4/netfilter/ipt_ttl.c b/net/ipv4/netfilter/ipt_ttl.c
deleted file mode 100644
index 297f1cbf4ff5..000000000000
--- a/net/ipv4/netfilter/ipt_ttl.c
+++ /dev/null
@@ -1,63 +0,0 @@
1/* IP tables module for matching the value of the TTL
2 *
3 * (C) 2000,2001 by Harald Welte <laforge@netfilter.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/ip.h>
11#include <linux/module.h>
12#include <linux/skbuff.h>
13
14#include <linux/netfilter_ipv4/ipt_ttl.h>
15#include <linux/netfilter/x_tables.h>
16
17MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
18MODULE_DESCRIPTION("Xtables: IPv4 TTL field match");
19MODULE_LICENSE("GPL");
20
21static bool ttl_mt(const struct sk_buff *skb, const struct xt_match_param *par)
22{
23 const struct ipt_ttl_info *info = par->matchinfo;
24 const u8 ttl = ip_hdr(skb)->ttl;
25
26 switch (info->mode) {
27 case IPT_TTL_EQ:
28 return ttl == info->ttl;
29 case IPT_TTL_NE:
30 return ttl != info->ttl;
31 case IPT_TTL_LT:
32 return ttl < info->ttl;
33 case IPT_TTL_GT:
34 return ttl > info->ttl;
35 default:
36 printk(KERN_WARNING "ipt_ttl: unknown mode %d\n",
37 info->mode);
38 return false;
39 }
40
41 return false;
42}
43
44static struct xt_match ttl_mt_reg __read_mostly = {
45 .name = "ttl",
46 .family = NFPROTO_IPV4,
47 .match = ttl_mt,
48 .matchsize = sizeof(struct ipt_ttl_info),
49 .me = THIS_MODULE,
50};
51
52static int __init ttl_mt_init(void)
53{
54 return xt_register_match(&ttl_mt_reg);
55}
56
57static void __exit ttl_mt_exit(void)
58{
59 xt_unregister_match(&ttl_mt_reg);
60}
61
62module_init(ttl_mt_init);
63module_exit(ttl_mt_exit);
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 52cb6939d093..c30a969724f8 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -56,7 +56,6 @@ static struct
56static struct xt_table packet_filter = { 56static struct xt_table packet_filter = {
57 .name = "filter", 57 .name = "filter",
58 .valid_hooks = FILTER_VALID_HOOKS, 58 .valid_hooks = FILTER_VALID_HOOKS,
59 .lock = __RW_LOCK_UNLOCKED(packet_filter.lock),
60 .me = THIS_MODULE, 59 .me = THIS_MODULE,
61 .af = AF_INET, 60 .af = AF_INET,
62}; 61};
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 3929d20b9e45..4087614d9519 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -67,7 +67,6 @@ static struct
67static struct xt_table packet_mangler = { 67static struct xt_table packet_mangler = {
68 .name = "mangle", 68 .name = "mangle",
69 .valid_hooks = MANGLE_VALID_HOOKS, 69 .valid_hooks = MANGLE_VALID_HOOKS,
70 .lock = __RW_LOCK_UNLOCKED(packet_mangler.lock),
71 .me = THIS_MODULE, 70 .me = THIS_MODULE,
72 .af = AF_INET, 71 .af = AF_INET,
73}; 72};
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 7f65d18333e3..e5356da1fb54 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -39,7 +39,6 @@ static struct
39static struct xt_table packet_raw = { 39static struct xt_table packet_raw = {
40 .name = "raw", 40 .name = "raw",
41 .valid_hooks = RAW_VALID_HOOKS, 41 .valid_hooks = RAW_VALID_HOOKS,
42 .lock = __RW_LOCK_UNLOCKED(packet_raw.lock),
43 .me = THIS_MODULE, 42 .me = THIS_MODULE,
44 .af = AF_INET, 43 .af = AF_INET,
45}; 44};
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index a52a35f4a584..29ab630f240a 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -60,7 +60,6 @@ static struct
60static struct xt_table security_table = { 60static struct xt_table security_table = {
61 .name = "security", 61 .name = "security",
62 .valid_hooks = SECURITY_VALID_HOOKS, 62 .valid_hooks = SECURITY_VALID_HOOKS,
63 .lock = __RW_LOCK_UNLOCKED(security_table.lock),
64 .me = THIS_MODULE, 63 .me = THIS_MODULE,
65 .af = AF_INET, 64 .af = AF_INET,
66}; 65};
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 4beb04fac588..8b681f24e271 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -120,8 +120,10 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
120 typeof(nf_nat_seq_adjust_hook) seq_adjust; 120 typeof(nf_nat_seq_adjust_hook) seq_adjust;
121 121
122 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook); 122 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
123 if (!seq_adjust || !seq_adjust(skb, ct, ctinfo)) 123 if (!seq_adjust || !seq_adjust(skb, ct, ctinfo)) {
124 NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
124 return NF_DROP; 125 return NF_DROP;
126 }
125 } 127 }
126out: 128out:
127 /* We've seen it coming out the other side: confirm it */ 129 /* We've seen it coming out the other side: confirm it */
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index a7eb04719044..6348a793936e 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -61,7 +61,6 @@ static struct
61static struct xt_table nat_table = { 61static struct xt_table nat_table = {
62 .name = "nat", 62 .name = "nat",
63 .valid_hooks = NAT_VALID_HOOKS, 63 .valid_hooks = NAT_VALID_HOOKS,
64 .lock = __RW_LOCK_UNLOCKED(nat_table.lock),
65 .me = THIS_MODULE, 64 .me = THIS_MODULE,
66 .af = AF_INET, 65 .af = AF_INET,
67}; 66};
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 53ea512c4608..625353a5fe18 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -95,13 +95,13 @@ config IP6_NF_MATCH_OPTS
95 To compile it as a module, choose M here. If unsure, say N. 95 To compile it as a module, choose M here. If unsure, say N.
96 96
97config IP6_NF_MATCH_HL 97config IP6_NF_MATCH_HL
98 tristate '"hl" match support' 98 tristate '"hl" hoplimit match support'
99 depends on NETFILTER_ADVANCED 99 depends on NETFILTER_ADVANCED
100 help 100 select NETFILTER_XT_MATCH_HL
101 HL matching allows you to match packets based on the hop 101 ---help---
102 limit of the packet. 102 This is a backwards-compat option for the user's convenience
103 103 (e.g. when running oldconfig). It selects
104 To compile it as a module, choose M here. If unsure, say N. 104 COFNIG_NETFILTER_XT_MATCH_HL.
105 105
106config IP6_NF_MATCH_IPV6HEADER 106config IP6_NF_MATCH_IPV6HEADER
107 tristate '"ipv6header" IPv6 Extension Headers Match' 107 tristate '"ipv6header" IPv6 Extension Headers Match'
@@ -130,6 +130,15 @@ config IP6_NF_MATCH_RT
130 To compile it as a module, choose M here. If unsure, say N. 130 To compile it as a module, choose M here. If unsure, say N.
131 131
132# The targets 132# The targets
133config IP6_NF_TARGET_HL
134 tristate '"HL" hoplimit target support'
135 depends on NETFILTER_ADVANCED
136 select NETFILTER_XT_TARGET_HL
137 ---help---
138 This is a backwards-compat option for the user's convenience
139 (e.g. when running oldconfig). It selects
140 COFNIG_NETFILTER_XT_TARGET_HL.
141
133config IP6_NF_TARGET_LOG 142config IP6_NF_TARGET_LOG
134 tristate "LOG target support" 143 tristate "LOG target support"
135 default m if NETFILTER_ADVANCED=n 144 default m if NETFILTER_ADVANCED=n
@@ -170,23 +179,6 @@ config IP6_NF_MANGLE
170 179
171 To compile it as a module, choose M here. If unsure, say N. 180 To compile it as a module, choose M here. If unsure, say N.
172 181
173config IP6_NF_TARGET_HL
174 tristate 'HL (hoplimit) target support'
175 depends on IP6_NF_MANGLE
176 depends on NETFILTER_ADVANCED
177 help
178 This option adds a `HL' target, which enables the user to decrement
179 the hoplimit value of the IPv6 header or set it to a given (lower)
180 value.
181
182 While it is safe to decrement the hoplimit value, this option also
183 enables functionality to increment and set the hoplimit value of the
184 IPv6 header to arbitrary values. This is EXTREMELY DANGEROUS since
185 you can easily create immortal packets that loop forever on the
186 network.
187
188 To compile it as a module, choose M here. If unsure, say N.
189
190config IP6_NF_RAW 182config IP6_NF_RAW
191 tristate 'raw table support (required for TRACE)' 183 tristate 'raw table support (required for TRACE)'
192 depends on NETFILTER_ADVANCED 184 depends on NETFILTER_ADVANCED
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 3f17c948eefb..aafbba30c899 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -20,13 +20,11 @@ obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o
20obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o 20obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
21obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o 21obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
22obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o 22obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o
23obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
24obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o 23obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
25obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o 24obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
26obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o 25obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o
27obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o 26obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
28 27
29# targets 28# targets
30obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o
31obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o 29obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
32obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o 30obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 5859c046cbc4..b693f841aeb4 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -643,6 +643,7 @@ static void __exit ip6_queue_fini(void)
643 643
644MODULE_DESCRIPTION("IPv6 packet queue handler"); 644MODULE_DESCRIPTION("IPv6 packet queue handler");
645MODULE_LICENSE("GPL"); 645MODULE_LICENSE("GPL");
646MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_IP6_FW);
646 647
647module_init(ip6_queue_init); 648module_init(ip6_queue_init);
648module_exit(ip6_queue_fini); 649module_exit(ip6_queue_fini);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index a33485dc81cb..34af7bb8df5f 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -89,6 +89,25 @@ ip6t_ext_hdr(u8 nexthdr)
89 (nexthdr == IPPROTO_DSTOPTS) ); 89 (nexthdr == IPPROTO_DSTOPTS) );
90} 90}
91 91
92static unsigned long ifname_compare(const char *_a, const char *_b,
93 const unsigned char *_mask)
94{
95 const unsigned long *a = (const unsigned long *)_a;
96 const unsigned long *b = (const unsigned long *)_b;
97 const unsigned long *mask = (const unsigned long *)_mask;
98 unsigned long ret;
99
100 ret = (a[0] ^ b[0]) & mask[0];
101 if (IFNAMSIZ > sizeof(unsigned long))
102 ret |= (a[1] ^ b[1]) & mask[1];
103 if (IFNAMSIZ > 2 * sizeof(unsigned long))
104 ret |= (a[2] ^ b[2]) & mask[2];
105 if (IFNAMSIZ > 3 * sizeof(unsigned long))
106 ret |= (a[3] ^ b[3]) & mask[3];
107 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
108 return ret;
109}
110
92/* Returns whether matches rule or not. */ 111/* Returns whether matches rule or not. */
93/* Performance critical - called for every packet */ 112/* Performance critical - called for every packet */
94static inline bool 113static inline bool
@@ -99,7 +118,6 @@ ip6_packet_match(const struct sk_buff *skb,
99 unsigned int *protoff, 118 unsigned int *protoff,
100 int *fragoff, bool *hotdrop) 119 int *fragoff, bool *hotdrop)
101{ 120{
102 size_t i;
103 unsigned long ret; 121 unsigned long ret;
104 const struct ipv6hdr *ipv6 = ipv6_hdr(skb); 122 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105 123
@@ -120,12 +138,7 @@ ip6_packet_match(const struct sk_buff *skb,
120 return false; 138 return false;
121 } 139 }
122 140
123 /* Look for ifname matches; this should unroll nicely. */ 141 ret = ifname_compare(indev, ip6info->iniface, ip6info->iniface_mask);
124 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
125 ret |= (((const unsigned long *)indev)[i]
126 ^ ((const unsigned long *)ip6info->iniface)[i])
127 & ((const unsigned long *)ip6info->iniface_mask)[i];
128 }
129 142
130 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) { 143 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
131 dprintf("VIA in mismatch (%s vs %s).%s\n", 144 dprintf("VIA in mismatch (%s vs %s).%s\n",
@@ -134,11 +147,7 @@ ip6_packet_match(const struct sk_buff *skb,
134 return false; 147 return false;
135 } 148 }
136 149
137 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) { 150 ret = ifname_compare(outdev, ip6info->outiface, ip6info->outiface_mask);
138 ret |= (((const unsigned long *)outdev)[i]
139 ^ ((const unsigned long *)ip6info->outiface)[i])
140 & ((const unsigned long *)ip6info->outiface_mask)[i];
141 }
142 151
143 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) { 152 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
144 dprintf("VIA out mismatch (%s vs %s).%s\n", 153 dprintf("VIA out mismatch (%s vs %s).%s\n",
@@ -373,10 +382,12 @@ ip6t_do_table(struct sk_buff *skb,
373 mtpar.family = tgpar.family = NFPROTO_IPV6; 382 mtpar.family = tgpar.family = NFPROTO_IPV6;
374 tgpar.hooknum = hook; 383 tgpar.hooknum = hook;
375 384
376 read_lock_bh(&table->lock);
377 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 385 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
378 private = table->private; 386
379 table_base = (void *)private->entries[smp_processor_id()]; 387 rcu_read_lock();
388 private = rcu_dereference(table->private);
389 table_base = rcu_dereference(private->entries[smp_processor_id()]);
390
380 e = get_entry(table_base, private->hook_entry[hook]); 391 e = get_entry(table_base, private->hook_entry[hook]);
381 392
382 /* For return from builtin chain */ 393 /* For return from builtin chain */
@@ -474,7 +485,7 @@ ip6t_do_table(struct sk_buff *skb,
474#ifdef CONFIG_NETFILTER_DEBUG 485#ifdef CONFIG_NETFILTER_DEBUG
475 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; 486 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
476#endif 487#endif
477 read_unlock_bh(&table->lock); 488 rcu_read_unlock();
478 489
479#ifdef DEBUG_ALLOW_ALL 490#ifdef DEBUG_ALLOW_ALL
480 return NF_ACCEPT; 491 return NF_ACCEPT;
@@ -955,11 +966,64 @@ get_counters(const struct xt_table_info *t,
955 } 966 }
956} 967}
957 968
969/* We're lazy, and add to the first CPU; overflow works its fey magic
970 * and everything is OK. */
971static int
972add_counter_to_entry(struct ip6t_entry *e,
973 const struct xt_counters addme[],
974 unsigned int *i)
975{
976 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
977
978 (*i)++;
979 return 0;
980}
981
982/* Take values from counters and add them back onto the current cpu */
983static void put_counters(struct xt_table_info *t,
984 const struct xt_counters counters[])
985{
986 unsigned int i, cpu;
987
988 local_bh_disable();
989 cpu = smp_processor_id();
990 i = 0;
991 IP6T_ENTRY_ITERATE(t->entries[cpu],
992 t->size,
993 add_counter_to_entry,
994 counters,
995 &i);
996 local_bh_enable();
997}
998
999static inline int
1000zero_entry_counter(struct ip6t_entry *e, void *arg)
1001{
1002 e->counters.bcnt = 0;
1003 e->counters.pcnt = 0;
1004 return 0;
1005}
1006
1007static void
1008clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
1009{
1010 unsigned int cpu;
1011 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
1012
1013 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1014 for_each_possible_cpu(cpu) {
1015 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
1016 IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
1017 zero_entry_counter, NULL);
1018 }
1019}
1020
958static struct xt_counters *alloc_counters(struct xt_table *table) 1021static struct xt_counters *alloc_counters(struct xt_table *table)
959{ 1022{
960 unsigned int countersize; 1023 unsigned int countersize;
961 struct xt_counters *counters; 1024 struct xt_counters *counters;
962 const struct xt_table_info *private = table->private; 1025 struct xt_table_info *private = table->private;
1026 struct xt_table_info *info;
963 1027
964 /* We need atomic snapshot of counters: rest doesn't change 1028 /* We need atomic snapshot of counters: rest doesn't change
965 (other than comefrom, which userspace doesn't care 1029 (other than comefrom, which userspace doesn't care
@@ -968,14 +1032,28 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
968 counters = vmalloc_node(countersize, numa_node_id()); 1032 counters = vmalloc_node(countersize, numa_node_id());
969 1033
970 if (counters == NULL) 1034 if (counters == NULL)
971 return ERR_PTR(-ENOMEM); 1035 goto nomem;
1036
1037 info = xt_alloc_table_info(private->size);
1038 if (!info)
1039 goto free_counters;
1040
1041 clone_counters(info, private);
972 1042
973 /* First, sum counters... */ 1043 mutex_lock(&table->lock);
974 write_lock_bh(&table->lock); 1044 xt_table_entry_swap_rcu(private, info);
975 get_counters(private, counters); 1045 synchronize_net(); /* Wait until smoke has cleared */
976 write_unlock_bh(&table->lock);
977 1046
978 return counters; 1047 get_counters(info, counters);
1048 put_counters(private, counters);
1049 mutex_unlock(&table->lock);
1050
1051 xt_free_table_info(info);
1052
1053 free_counters:
1054 vfree(counters);
1055 nomem:
1056 return ERR_PTR(-ENOMEM);
979} 1057}
980 1058
981static int 1059static int
@@ -1342,28 +1420,6 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1342 return ret; 1420 return ret;
1343} 1421}
1344 1422
1345/* We're lazy, and add to the first CPU; overflow works its fey magic
1346 * and everything is OK. */
1347static inline int
1348add_counter_to_entry(struct ip6t_entry *e,
1349 const struct xt_counters addme[],
1350 unsigned int *i)
1351{
1352#if 0
1353 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1354 *i,
1355 (long unsigned int)e->counters.pcnt,
1356 (long unsigned int)e->counters.bcnt,
1357 (long unsigned int)addme[*i].pcnt,
1358 (long unsigned int)addme[*i].bcnt);
1359#endif
1360
1361 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1362
1363 (*i)++;
1364 return 0;
1365}
1366
1367static int 1423static int
1368do_add_counters(struct net *net, void __user *user, unsigned int len, 1424do_add_counters(struct net *net, void __user *user, unsigned int len,
1369 int compat) 1425 int compat)
@@ -1424,13 +1480,14 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
1424 goto free; 1480 goto free;
1425 } 1481 }
1426 1482
1427 write_lock_bh(&t->lock); 1483 mutex_lock(&t->lock);
1428 private = t->private; 1484 private = t->private;
1429 if (private->number != num_counters) { 1485 if (private->number != num_counters) {
1430 ret = -EINVAL; 1486 ret = -EINVAL;
1431 goto unlock_up_free; 1487 goto unlock_up_free;
1432 } 1488 }
1433 1489
1490 preempt_disable();
1434 i = 0; 1491 i = 0;
1435 /* Choose the copy that is on our node */ 1492 /* Choose the copy that is on our node */
1436 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1493 loc_cpu_entry = private->entries[raw_smp_processor_id()];
@@ -1439,8 +1496,9 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
1439 add_counter_to_entry, 1496 add_counter_to_entry,
1440 paddc, 1497 paddc,
1441 &i); 1498 &i);
1499 preempt_enable();
1442 unlock_up_free: 1500 unlock_up_free:
1443 write_unlock_bh(&t->lock); 1501 mutex_unlock(&t->lock);
1444 xt_table_unlock(t); 1502 xt_table_unlock(t);
1445 module_put(t->me); 1503 module_put(t->me);
1446 free: 1504 free:
diff --git a/net/ipv6/netfilter/ip6t_HL.c b/net/ipv6/netfilter/ip6t_HL.c
deleted file mode 100644
index 27b5adf670a2..000000000000
--- a/net/ipv6/netfilter/ip6t_HL.c
+++ /dev/null
@@ -1,95 +0,0 @@
1/*
2 * Hop Limit modification target for ip6tables
3 * Maciej Soltysiak <solt@dns.toxicfilms.tv>
4 * Based on HW's TTL module
5 *
6 * This software is distributed under the terms of GNU GPL
7 */
8
9#include <linux/module.h>
10#include <linux/skbuff.h>
11#include <linux/ip.h>
12#include <linux/ipv6.h>
13
14#include <linux/netfilter/x_tables.h>
15#include <linux/netfilter_ipv6/ip6t_HL.h>
16
17MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
18MODULE_DESCRIPTION("Xtables: IPv6 Hop Limit field modification target");
19MODULE_LICENSE("GPL");
20
21static unsigned int
22hl_tg6(struct sk_buff *skb, const struct xt_target_param *par)
23{
24 struct ipv6hdr *ip6h;
25 const struct ip6t_HL_info *info = par->targinfo;
26 int new_hl;
27
28 if (!skb_make_writable(skb, skb->len))
29 return NF_DROP;
30
31 ip6h = ipv6_hdr(skb);
32
33 switch (info->mode) {
34 case IP6T_HL_SET:
35 new_hl = info->hop_limit;
36 break;
37 case IP6T_HL_INC:
38 new_hl = ip6h->hop_limit + info->hop_limit;
39 if (new_hl > 255)
40 new_hl = 255;
41 break;
42 case IP6T_HL_DEC:
43 new_hl = ip6h->hop_limit - info->hop_limit;
44 if (new_hl < 0)
45 new_hl = 0;
46 break;
47 default:
48 new_hl = ip6h->hop_limit;
49 break;
50 }
51
52 ip6h->hop_limit = new_hl;
53
54 return XT_CONTINUE;
55}
56
57static bool hl_tg6_check(const struct xt_tgchk_param *par)
58{
59 const struct ip6t_HL_info *info = par->targinfo;
60
61 if (info->mode > IP6T_HL_MAXMODE) {
62 printk(KERN_WARNING "ip6t_HL: invalid or unknown Mode %u\n",
63 info->mode);
64 return false;
65 }
66 if (info->mode != IP6T_HL_SET && info->hop_limit == 0) {
67 printk(KERN_WARNING "ip6t_HL: increment/decrement doesn't "
68 "make sense with value 0\n");
69 return false;
70 }
71 return true;
72}
73
74static struct xt_target hl_tg6_reg __read_mostly = {
75 .name = "HL",
76 .family = NFPROTO_IPV6,
77 .target = hl_tg6,
78 .targetsize = sizeof(struct ip6t_HL_info),
79 .table = "mangle",
80 .checkentry = hl_tg6_check,
81 .me = THIS_MODULE
82};
83
84static int __init hl_tg6_init(void)
85{
86 return xt_register_target(&hl_tg6_reg);
87}
88
89static void __exit hl_tg6_exit(void)
90{
91 xt_unregister_target(&hl_tg6_reg);
92}
93
94module_init(hl_tg6_init);
95module_exit(hl_tg6_exit);
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 37adf5abc51e..7018cac4fddc 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -477,7 +477,7 @@ static struct xt_target log_tg6_reg __read_mostly = {
477 .me = THIS_MODULE, 477 .me = THIS_MODULE,
478}; 478};
479 479
480static const struct nf_logger ip6t_logger = { 480static struct nf_logger ip6t_logger __read_mostly = {
481 .name = "ip6t_LOG", 481 .name = "ip6t_LOG",
482 .logfn = &ip6t_log_packet, 482 .logfn = &ip6t_log_packet,
483 .me = THIS_MODULE, 483 .me = THIS_MODULE,
diff --git a/net/ipv6/netfilter/ip6t_hl.c b/net/ipv6/netfilter/ip6t_hl.c
deleted file mode 100644
index c964dca1132d..000000000000
--- a/net/ipv6/netfilter/ip6t_hl.c
+++ /dev/null
@@ -1,68 +0,0 @@
1/* Hop Limit matching module */
2
3/* (C) 2001-2002 Maciej Soltysiak <solt@dns.toxicfilms.tv>
4 * Based on HW's ttl module
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/ipv6.h>
12#include <linux/module.h>
13#include <linux/skbuff.h>
14
15#include <linux/netfilter_ipv6/ip6t_hl.h>
16#include <linux/netfilter/x_tables.h>
17
18MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
19MODULE_DESCRIPTION("Xtables: IPv6 Hop Limit field match");
20MODULE_LICENSE("GPL");
21
22static bool hl_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
23{
24 const struct ip6t_hl_info *info = par->matchinfo;
25 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
26
27 switch (info->mode) {
28 case IP6T_HL_EQ:
29 return ip6h->hop_limit == info->hop_limit;
30 break;
31 case IP6T_HL_NE:
32 return ip6h->hop_limit != info->hop_limit;
33 break;
34 case IP6T_HL_LT:
35 return ip6h->hop_limit < info->hop_limit;
36 break;
37 case IP6T_HL_GT:
38 return ip6h->hop_limit > info->hop_limit;
39 break;
40 default:
41 printk(KERN_WARNING "ip6t_hl: unknown mode %d\n",
42 info->mode);
43 return false;
44 }
45
46 return false;
47}
48
49static struct xt_match hl_mt6_reg __read_mostly = {
50 .name = "hl",
51 .family = NFPROTO_IPV6,
52 .match = hl_mt6,
53 .matchsize = sizeof(struct ip6t_hl_info),
54 .me = THIS_MODULE,
55};
56
57static int __init hl_mt6_init(void)
58{
59 return xt_register_match(&hl_mt6_reg);
60}
61
62static void __exit hl_mt6_exit(void)
63{
64 xt_unregister_match(&hl_mt6_reg);
65}
66
67module_init(hl_mt6_init);
68module_exit(hl_mt6_exit);
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 40d2e36d8fac..ef5a0a32bf8e 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -54,7 +54,6 @@ static struct
54static struct xt_table packet_filter = { 54static struct xt_table packet_filter = {
55 .name = "filter", 55 .name = "filter",
56 .valid_hooks = FILTER_VALID_HOOKS, 56 .valid_hooks = FILTER_VALID_HOOKS,
57 .lock = __RW_LOCK_UNLOCKED(packet_filter.lock),
58 .me = THIS_MODULE, 57 .me = THIS_MODULE,
59 .af = AF_INET6, 58 .af = AF_INET6,
60}; 59};
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index d0b31b259d4d..ab0d398a2ba7 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -60,7 +60,6 @@ static struct
60static struct xt_table packet_mangler = { 60static struct xt_table packet_mangler = {
61 .name = "mangle", 61 .name = "mangle",
62 .valid_hooks = MANGLE_VALID_HOOKS, 62 .valid_hooks = MANGLE_VALID_HOOKS,
63 .lock = __RW_LOCK_UNLOCKED(packet_mangler.lock),
64 .me = THIS_MODULE, 63 .me = THIS_MODULE,
65 .af = AF_INET6, 64 .af = AF_INET6,
66}; 65};
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 109fab6f831a..4b792b6ca321 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -38,7 +38,6 @@ static struct
38static struct xt_table packet_raw = { 38static struct xt_table packet_raw = {
39 .name = "raw", 39 .name = "raw",
40 .valid_hooks = RAW_VALID_HOOKS, 40 .valid_hooks = RAW_VALID_HOOKS,
41 .lock = __RW_LOCK_UNLOCKED(packet_raw.lock),
42 .me = THIS_MODULE, 41 .me = THIS_MODULE,
43 .af = AF_INET6, 42 .af = AF_INET6,
44}; 43};
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index 20bc52f13e43..0ea37ff15d56 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -59,7 +59,6 @@ static struct
59static struct xt_table security_table = { 59static struct xt_table security_table = {
60 .name = "security", 60 .name = "security",
61 .valid_hooks = SECURITY_VALID_HOOKS, 61 .valid_hooks = SECURITY_VALID_HOOKS,
62 .lock = __RW_LOCK_UNLOCKED(security_table.lock),
63 .me = THIS_MODULE, 62 .me = THIS_MODULE,
64 .af = AF_INET6, 63 .af = AF_INET6,
65}; 64};
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 727b9530448a..e6852f617217 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -26,6 +26,7 @@
26#include <net/netfilter/nf_conntrack_l4proto.h> 26#include <net/netfilter/nf_conntrack_l4proto.h>
27#include <net/netfilter/nf_conntrack_l3proto.h> 27#include <net/netfilter/nf_conntrack_l3proto.h>
28#include <net/netfilter/nf_conntrack_core.h> 28#include <net/netfilter/nf_conntrack_core.h>
29#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
29 30
30static bool ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, 31static bool ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
31 struct nf_conntrack_tuple *tuple) 32 struct nf_conntrack_tuple *tuple)
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 72dbb6d1a6b3..41b8a956e1be 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -126,6 +126,10 @@ static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
126 pr_debug("icmpv6: can't create new conn with type %u\n", 126 pr_debug("icmpv6: can't create new conn with type %u\n",
127 type + 128); 127 type + 128);
128 nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple); 128 nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple);
129 if (LOG_INVALID(nf_ct_net(ct), IPPROTO_ICMPV6))
130 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
131 "nf_ct_icmpv6: invalid new with type %d ",
132 type + 128);
129 return false; 133 return false;
130 } 134 }
131 atomic_set(&ct->proto.icmp.count, 0); 135 atomic_set(&ct->proto.icmp.count, 0);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index c2bac9cd0caf..2562d05dbaf5 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -357,6 +357,45 @@ config NETFILTER_XT_TARGET_DSCP
357 357
358 To compile it as a module, choose M here. If unsure, say N. 358 To compile it as a module, choose M here. If unsure, say N.
359 359
360config NETFILTER_XT_TARGET_HL
361 tristate '"HL" hoplimit target support'
362 depends on IP_NF_MANGLE || IP6_NF_MANGLE
363 depends on NETFILTER_ADVANCED
364 ---help---
365 This option adds the "HL" (for IPv6) and "TTL" (for IPv4)
366 targets, which enable the user to change the
367 hoplimit/time-to-live value of the IP header.
368
369 While it is safe to decrement the hoplimit/TTL value, the
370 modules also allow to increment and set the hoplimit value of
371 the header to arbitrary values. This is EXTREMELY DANGEROUS
372 since you can easily create immortal packets that loop
373 forever on the network.
374
375config NETFILTER_XT_TARGET_LED
376 tristate '"LED" target support'
377 depends on LEDS_CLASS
378 depends on NETFILTER_ADVANCED
379 help
380 This option adds a `LED' target, which allows you to blink LEDs in
381 response to particular packets passing through your machine.
382
383 This can be used to turn a spare LED into a network activity LED,
384 which only flashes in response to FTP transfers, for example. Or
385 you could have an LED which lights up for a minute or two every time
386 somebody connects to your machine via SSH.
387
388 You will need support for the "led" class to make this work.
389
390 To create an LED trigger for incoming SSH traffic:
391 iptables -A INPUT -p tcp --dport 22 -j LED --led-trigger-id ssh --led-delay 1000
392
393 Then attach the new trigger to an LED on your system:
394 echo netfilter-ssh > /sys/class/leds/<ledname>/trigger
395
396 For more information on the LEDs available on your system, see
397 Documentation/leds-class.txt
398
360config NETFILTER_XT_TARGET_MARK 399config NETFILTER_XT_TARGET_MARK
361 tristate '"MARK" target support' 400 tristate '"MARK" target support'
362 default m if NETFILTER_ADVANCED=n 401 default m if NETFILTER_ADVANCED=n
@@ -488,6 +527,22 @@ config NETFILTER_XT_TARGET_TCPOPTSTRIP
488 This option adds a "TCPOPTSTRIP" target, which allows you to strip 527 This option adds a "TCPOPTSTRIP" target, which allows you to strip
489 TCP options from TCP packets. 528 TCP options from TCP packets.
490 529
530config NETFILTER_XT_MATCH_CLUSTER
531 tristate '"cluster" match support'
532 depends on NF_CONNTRACK
533 depends on NETFILTER_ADVANCED
534 ---help---
535 This option allows you to build work-load-sharing clusters of
536 network servers/stateful firewalls without having a dedicated
537 load-balancing router/server/switch. Basically, this match returns
538 true when the packet must be handled by this cluster node. Thus,
539 all nodes see all packets and this match decides which node handles
540 what packets. The work-load sharing algorithm is based on source
541 address hashing.
542
543 If you say Y or M here, try `iptables -m cluster --help` for
544 more information.
545
491config NETFILTER_XT_MATCH_COMMENT 546config NETFILTER_XT_MATCH_COMMENT
492 tristate '"comment" match support' 547 tristate '"comment" match support'
493 depends on NETFILTER_ADVANCED 548 depends on NETFILTER_ADVANCED
@@ -605,6 +660,14 @@ config NETFILTER_XT_MATCH_HELPER
605 660
606 To compile it as a module, choose M here. If unsure, say Y. 661 To compile it as a module, choose M here. If unsure, say Y.
607 662
663config NETFILTER_XT_MATCH_HL
664 tristate '"hl" hoplimit/TTL match support'
665 depends on NETFILTER_ADVANCED
666 ---help---
667 HL matching allows you to match packets based on the hoplimit
668 in the IPv6 header, or the time-to-live field in the IPv4
669 header of the packet.
670
608config NETFILTER_XT_MATCH_IPRANGE 671config NETFILTER_XT_MATCH_IPRANGE
609 tristate '"iprange" address range match support' 672 tristate '"iprange" address range match support'
610 depends on NETFILTER_ADVANCED 673 depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index da3d909e053f..6282060fbda9 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -45,6 +45,8 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
45obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o 45obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
46obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o 46obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
47obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o 47obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
48obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
49obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
48obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o 50obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
49obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o 51obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
50obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o 52obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
@@ -57,6 +59,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o
57obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o 59obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
58 60
59# matches 61# matches
62obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o
60obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o 63obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o
61obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o 64obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o
62obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o 65obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o
@@ -67,6 +70,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
67obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o 70obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
68obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o 71obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
69obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o 72obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
70obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o 74obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o
71obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o 75obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
72obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o 76obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index a90ac83c5918..5bb34737501f 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -174,7 +174,6 @@ next_hook:
174 outdev, &elem, okfn, hook_thresh); 174 outdev, &elem, okfn, hook_thresh);
175 if (verdict == NF_ACCEPT || verdict == NF_STOP) { 175 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
176 ret = 1; 176 ret = 1;
177 goto unlock;
178 } else if (verdict == NF_DROP) { 177 } else if (verdict == NF_DROP) {
179 kfree_skb(skb); 178 kfree_skb(skb);
180 ret = -EPERM; 179 ret = -EPERM;
@@ -183,7 +182,6 @@ next_hook:
183 verdict >> NF_VERDICT_BITS)) 182 verdict >> NF_VERDICT_BITS))
184 goto next_hook; 183 goto next_hook;
185 } 184 }
186unlock:
187 rcu_read_unlock(); 185 rcu_read_unlock();
188 return ret; 186 return ret;
189} 187}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index f4935e344b61..dfb447b584da 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -54,7 +54,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_lock);
54unsigned int nf_conntrack_htable_size __read_mostly; 54unsigned int nf_conntrack_htable_size __read_mostly;
55EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); 55EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
56 56
57int nf_conntrack_max __read_mostly; 57unsigned int nf_conntrack_max __read_mostly;
58EXPORT_SYMBOL_GPL(nf_conntrack_max); 58EXPORT_SYMBOL_GPL(nf_conntrack_max);
59 59
60struct nf_conn nf_conntrack_untracked __read_mostly; 60struct nf_conn nf_conntrack_untracked __read_mostly;
@@ -472,7 +472,8 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
472 struct nf_conn *ct; 472 struct nf_conn *ct;
473 473
474 if (unlikely(!nf_conntrack_hash_rnd_initted)) { 474 if (unlikely(!nf_conntrack_hash_rnd_initted)) {
475 get_random_bytes(&nf_conntrack_hash_rnd, 4); 475 get_random_bytes(&nf_conntrack_hash_rnd,
476 sizeof(nf_conntrack_hash_rnd));
476 nf_conntrack_hash_rnd_initted = 1; 477 nf_conntrack_hash_rnd_initted = 1;
477 } 478 }
478 479
@@ -516,16 +517,17 @@ EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
516static void nf_conntrack_free_rcu(struct rcu_head *head) 517static void nf_conntrack_free_rcu(struct rcu_head *head)
517{ 518{
518 struct nf_conn *ct = container_of(head, struct nf_conn, rcu); 519 struct nf_conn *ct = container_of(head, struct nf_conn, rcu);
519 struct net *net = nf_ct_net(ct);
520 520
521 nf_ct_ext_free(ct); 521 nf_ct_ext_free(ct);
522 kmem_cache_free(nf_conntrack_cachep, ct); 522 kmem_cache_free(nf_conntrack_cachep, ct);
523 atomic_dec(&net->ct.count);
524} 523}
525 524
526void nf_conntrack_free(struct nf_conn *ct) 525void nf_conntrack_free(struct nf_conn *ct)
527{ 526{
527 struct net *net = nf_ct_net(ct);
528
528 nf_ct_ext_destroy(ct); 529 nf_ct_ext_destroy(ct);
530 atomic_dec(&net->ct.count);
529 call_rcu(&ct->rcu, nf_conntrack_free_rcu); 531 call_rcu(&ct->rcu, nf_conntrack_free_rcu);
530} 532}
531EXPORT_SYMBOL_GPL(nf_conntrack_free); 533EXPORT_SYMBOL_GPL(nf_conntrack_free);
@@ -733,6 +735,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
733 nf_conntrack_put(skb->nfct); 735 nf_conntrack_put(skb->nfct);
734 skb->nfct = NULL; 736 skb->nfct = NULL;
735 NF_CT_STAT_INC_ATOMIC(net, invalid); 737 NF_CT_STAT_INC_ATOMIC(net, invalid);
738 if (ret == -NF_DROP)
739 NF_CT_STAT_INC_ATOMIC(net, drop);
736 return -ret; 740 return -ret;
737 } 741 }
738 742
@@ -1103,7 +1107,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1103 1107
1104 /* We have to rehahs for the new table anyway, so we also can 1108 /* We have to rehahs for the new table anyway, so we also can
1105 * use a newrandom seed */ 1109 * use a newrandom seed */
1106 get_random_bytes(&rnd, 4); 1110 get_random_bytes(&rnd, sizeof(rnd));
1107 1111
1108 /* Lookups in the old hash might happen in parallel, which means we 1112 /* Lookups in the old hash might happen in parallel, which means we
1109 * might get false negatives during connection lookup. New connections 1113 * might get false negatives during connection lookup. New connections
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 3a8a34a6d37c..357ba39d4c8d 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -72,7 +72,8 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
72 unsigned int hash; 72 unsigned int hash;
73 73
74 if (unlikely(!nf_ct_expect_hash_rnd_initted)) { 74 if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
75 get_random_bytes(&nf_ct_expect_hash_rnd, 4); 75 get_random_bytes(&nf_ct_expect_hash_rnd,
76 sizeof(nf_ct_expect_hash_rnd));
76 nf_ct_expect_hash_rnd_initted = 1; 77 nf_ct_expect_hash_rnd_initted = 1;
77 } 78 }
78 79
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ed6d873ad384..7a16bd462f82 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -518,6 +518,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
518nla_put_failure: 518nla_put_failure:
519 rcu_read_unlock(); 519 rcu_read_unlock();
520nlmsg_failure: 520nlmsg_failure:
521 nfnetlink_set_err(0, group, -ENOBUFS);
521 kfree_skb(skb); 522 kfree_skb(skb);
522 return NOTIFY_DONE; 523 return NOTIFY_DONE;
523} 524}
@@ -599,7 +600,8 @@ ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple)
599 600
600 nla_parse_nested(tb, CTA_IP_MAX, attr, NULL); 601 nla_parse_nested(tb, CTA_IP_MAX, attr, NULL);
601 602
602 l3proto = nf_ct_l3proto_find_get(tuple->src.l3num); 603 rcu_read_lock();
604 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
603 605
604 if (likely(l3proto->nlattr_to_tuple)) { 606 if (likely(l3proto->nlattr_to_tuple)) {
605 ret = nla_validate_nested(attr, CTA_IP_MAX, 607 ret = nla_validate_nested(attr, CTA_IP_MAX,
@@ -608,7 +610,7 @@ ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple)
608 ret = l3proto->nlattr_to_tuple(tb, tuple); 610 ret = l3proto->nlattr_to_tuple(tb, tuple);
609 } 611 }
610 612
611 nf_ct_l3proto_put(l3proto); 613 rcu_read_unlock();
612 614
613 return ret; 615 return ret;
614} 616}
@@ -633,7 +635,8 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
633 return -EINVAL; 635 return -EINVAL;
634 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]); 636 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
635 637
636 l4proto = nf_ct_l4proto_find_get(tuple->src.l3num, tuple->dst.protonum); 638 rcu_read_lock();
639 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
637 640
638 if (likely(l4proto->nlattr_to_tuple)) { 641 if (likely(l4proto->nlattr_to_tuple)) {
639 ret = nla_validate_nested(attr, CTA_PROTO_MAX, 642 ret = nla_validate_nested(attr, CTA_PROTO_MAX,
@@ -642,7 +645,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
642 ret = l4proto->nlattr_to_tuple(tb, tuple); 645 ret = l4proto->nlattr_to_tuple(tb, tuple);
643 } 646 }
644 647
645 nf_ct_l4proto_put(l4proto); 648 rcu_read_unlock();
646 649
647 return ret; 650 return ret;
648} 651}
@@ -989,10 +992,11 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, struct nlattr *cda[])
989 992
990 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL); 993 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL);
991 994
992 l4proto = nf_ct_l4proto_find_get(nf_ct_l3num(ct), nf_ct_protonum(ct)); 995 rcu_read_lock();
996 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
993 if (l4proto->from_nlattr) 997 if (l4proto->from_nlattr)
994 err = l4proto->from_nlattr(tb, ct); 998 err = l4proto->from_nlattr(tb, ct);
995 nf_ct_l4proto_put(l4proto); 999 rcu_read_unlock();
996 1000
997 return err; 1001 return err;
998} 1002}
@@ -1062,6 +1066,10 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[])
1062{ 1066{
1063 int err; 1067 int err;
1064 1068
1069 /* only allow NAT changes and master assignation for new conntracks */
1070 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
1071 return -EOPNOTSUPP;
1072
1065 if (cda[CTA_HELP]) { 1073 if (cda[CTA_HELP]) {
1066 err = ctnetlink_change_helper(ct, cda); 1074 err = ctnetlink_change_helper(ct, cda);
1067 if (err < 0) 1075 if (err < 0)
@@ -1124,13 +1132,11 @@ ctnetlink_event_report(struct nf_conn *ct, u32 pid, int report)
1124 report); 1132 report);
1125} 1133}
1126 1134
1127static int 1135static struct nf_conn *
1128ctnetlink_create_conntrack(struct nlattr *cda[], 1136ctnetlink_create_conntrack(struct nlattr *cda[],
1129 struct nf_conntrack_tuple *otuple, 1137 struct nf_conntrack_tuple *otuple,
1130 struct nf_conntrack_tuple *rtuple, 1138 struct nf_conntrack_tuple *rtuple,
1131 struct nf_conn *master_ct, 1139 u8 u3)
1132 u32 pid,
1133 int report)
1134{ 1140{
1135 struct nf_conn *ct; 1141 struct nf_conn *ct;
1136 int err = -EINVAL; 1142 int err = -EINVAL;
@@ -1138,10 +1144,10 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1138 1144
1139 ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC); 1145 ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC);
1140 if (IS_ERR(ct)) 1146 if (IS_ERR(ct))
1141 return -ENOMEM; 1147 return ERR_PTR(-ENOMEM);
1142 1148
1143 if (!cda[CTA_TIMEOUT]) 1149 if (!cda[CTA_TIMEOUT])
1144 goto err; 1150 goto err1;
1145 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); 1151 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1146 1152
1147 ct->timeout.expires = jiffies + ct->timeout.expires * HZ; 1153 ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
@@ -1152,10 +1158,8 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1152 char *helpname; 1158 char *helpname;
1153 1159
1154 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); 1160 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
1155 if (err < 0) { 1161 if (err < 0)
1156 rcu_read_unlock(); 1162 goto err2;
1157 goto err;
1158 }
1159 1163
1160 helper = __nf_conntrack_helper_find_byname(helpname); 1164 helper = __nf_conntrack_helper_find_byname(helpname);
1161 if (helper == NULL) { 1165 if (helper == NULL) {
@@ -1163,28 +1167,26 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1163#ifdef CONFIG_MODULES 1167#ifdef CONFIG_MODULES
1164 if (request_module("nfct-helper-%s", helpname) < 0) { 1168 if (request_module("nfct-helper-%s", helpname) < 0) {
1165 err = -EOPNOTSUPP; 1169 err = -EOPNOTSUPP;
1166 goto err; 1170 goto err1;
1167 } 1171 }
1168 1172
1169 rcu_read_lock(); 1173 rcu_read_lock();
1170 helper = __nf_conntrack_helper_find_byname(helpname); 1174 helper = __nf_conntrack_helper_find_byname(helpname);
1171 if (helper) { 1175 if (helper) {
1172 rcu_read_unlock();
1173 err = -EAGAIN; 1176 err = -EAGAIN;
1174 goto err; 1177 goto err2;
1175 } 1178 }
1176 rcu_read_unlock(); 1179 rcu_read_unlock();
1177#endif 1180#endif
1178 err = -EOPNOTSUPP; 1181 err = -EOPNOTSUPP;
1179 goto err; 1182 goto err1;
1180 } else { 1183 } else {
1181 struct nf_conn_help *help; 1184 struct nf_conn_help *help;
1182 1185
1183 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 1186 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
1184 if (help == NULL) { 1187 if (help == NULL) {
1185 rcu_read_unlock();
1186 err = -ENOMEM; 1188 err = -ENOMEM;
1187 goto err; 1189 goto err2;
1188 } 1190 }
1189 1191
1190 /* not in hash table yet so not strictly necessary */ 1192 /* not in hash table yet so not strictly necessary */
@@ -1193,44 +1195,34 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1193 } else { 1195 } else {
1194 /* try an implicit helper assignation */ 1196 /* try an implicit helper assignation */
1195 err = __nf_ct_try_assign_helper(ct, GFP_ATOMIC); 1197 err = __nf_ct_try_assign_helper(ct, GFP_ATOMIC);
1196 if (err < 0) { 1198 if (err < 0)
1197 rcu_read_unlock(); 1199 goto err2;
1198 goto err;
1199 }
1200 } 1200 }
1201 1201
1202 if (cda[CTA_STATUS]) { 1202 if (cda[CTA_STATUS]) {
1203 err = ctnetlink_change_status(ct, cda); 1203 err = ctnetlink_change_status(ct, cda);
1204 if (err < 0) { 1204 if (err < 0)
1205 rcu_read_unlock(); 1205 goto err2;
1206 goto err;
1207 }
1208 } 1206 }
1209 1207
1210 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { 1208 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1211 err = ctnetlink_change_nat(ct, cda); 1209 err = ctnetlink_change_nat(ct, cda);
1212 if (err < 0) { 1210 if (err < 0)
1213 rcu_read_unlock(); 1211 goto err2;
1214 goto err;
1215 }
1216 } 1212 }
1217 1213
1218#ifdef CONFIG_NF_NAT_NEEDED 1214#ifdef CONFIG_NF_NAT_NEEDED
1219 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) { 1215 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1220 err = ctnetlink_change_nat_seq_adj(ct, cda); 1216 err = ctnetlink_change_nat_seq_adj(ct, cda);
1221 if (err < 0) { 1217 if (err < 0)
1222 rcu_read_unlock(); 1218 goto err2;
1223 goto err;
1224 }
1225 } 1219 }
1226#endif 1220#endif
1227 1221
1228 if (cda[CTA_PROTOINFO]) { 1222 if (cda[CTA_PROTOINFO]) {
1229 err = ctnetlink_change_protoinfo(ct, cda); 1223 err = ctnetlink_change_protoinfo(ct, cda);
1230 if (err < 0) { 1224 if (err < 0)
1231 rcu_read_unlock(); 1225 goto err2;
1232 goto err;
1233 }
1234 } 1226 }
1235 1227
1236 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 1228 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
@@ -1241,23 +1233,37 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1241#endif 1233#endif
1242 1234
1243 /* setup master conntrack: this is a confirmed expectation */ 1235 /* setup master conntrack: this is a confirmed expectation */
1244 if (master_ct) { 1236 if (cda[CTA_TUPLE_MASTER]) {
1237 struct nf_conntrack_tuple master;
1238 struct nf_conntrack_tuple_hash *master_h;
1239 struct nf_conn *master_ct;
1240
1241 err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
1242 if (err < 0)
1243 goto err2;
1244
1245 master_h = __nf_conntrack_find(&init_net, &master);
1246 if (master_h == NULL) {
1247 err = -ENOENT;
1248 goto err2;
1249 }
1250 master_ct = nf_ct_tuplehash_to_ctrack(master_h);
1251 nf_conntrack_get(&master_ct->ct_general);
1245 __set_bit(IPS_EXPECTED_BIT, &ct->status); 1252 __set_bit(IPS_EXPECTED_BIT, &ct->status);
1246 ct->master = master_ct; 1253 ct->master = master_ct;
1247 } 1254 }
1248 1255
1249 nf_conntrack_get(&ct->ct_general);
1250 add_timer(&ct->timeout); 1256 add_timer(&ct->timeout);
1251 nf_conntrack_hash_insert(ct); 1257 nf_conntrack_hash_insert(ct);
1252 rcu_read_unlock(); 1258 rcu_read_unlock();
1253 ctnetlink_event_report(ct, pid, report);
1254 nf_ct_put(ct);
1255 1259
1256 return 0; 1260 return ct;
1257 1261
1258err: 1262err2:
1263 rcu_read_unlock();
1264err1:
1259 nf_conntrack_free(ct); 1265 nf_conntrack_free(ct);
1260 return err; 1266 return ERR_PTR(err);
1261} 1267}
1262 1268
1263static int 1269static int
@@ -1289,38 +1295,25 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1289 h = __nf_conntrack_find(&init_net, &rtuple); 1295 h = __nf_conntrack_find(&init_net, &rtuple);
1290 1296
1291 if (h == NULL) { 1297 if (h == NULL) {
1292 struct nf_conntrack_tuple master; 1298 err = -ENOENT;
1293 struct nf_conntrack_tuple_hash *master_h = NULL; 1299 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1294 struct nf_conn *master_ct = NULL; 1300 struct nf_conn *ct;
1295
1296 if (cda[CTA_TUPLE_MASTER]) {
1297 err = ctnetlink_parse_tuple(cda,
1298 &master,
1299 CTA_TUPLE_MASTER,
1300 u3);
1301 if (err < 0)
1302 goto out_unlock;
1303 1301
1304 master_h = __nf_conntrack_find(&init_net, &master); 1302 ct = ctnetlink_create_conntrack(cda, &otuple,
1305 if (master_h == NULL) { 1303 &rtuple, u3);
1306 err = -ENOENT; 1304 if (IS_ERR(ct)) {
1305 err = PTR_ERR(ct);
1307 goto out_unlock; 1306 goto out_unlock;
1308 } 1307 }
1309 master_ct = nf_ct_tuplehash_to_ctrack(master_h); 1308 err = 0;
1310 nf_conntrack_get(&master_ct->ct_general); 1309 nf_conntrack_get(&ct->ct_general);
1311 } 1310 spin_unlock_bh(&nf_conntrack_lock);
1312 1311 ctnetlink_event_report(ct,
1313 err = -ENOENT; 1312 NETLINK_CB(skb).pid,
1314 if (nlh->nlmsg_flags & NLM_F_CREATE) 1313 nlmsg_report(nlh));
1315 err = ctnetlink_create_conntrack(cda, 1314 nf_ct_put(ct);
1316 &otuple, 1315 } else
1317 &rtuple, 1316 spin_unlock_bh(&nf_conntrack_lock);
1318 master_ct,
1319 NETLINK_CB(skb).pid,
1320 nlmsg_report(nlh));
1321 spin_unlock_bh(&nf_conntrack_lock);
1322 if (err < 0 && master_ct)
1323 nf_ct_put(master_ct);
1324 1317
1325 return err; 1318 return err;
1326 } 1319 }
@@ -1332,17 +1325,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1332 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { 1325 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1333 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); 1326 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
1334 1327
1335 /* we only allow nat config for new conntracks */
1336 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1337 err = -EOPNOTSUPP;
1338 goto out_unlock;
1339 }
1340 /* can't link an existing conntrack to a master */
1341 if (cda[CTA_TUPLE_MASTER]) {
1342 err = -EOPNOTSUPP;
1343 goto out_unlock;
1344 }
1345
1346 err = ctnetlink_change_conntrack(ct, cda); 1328 err = ctnetlink_change_conntrack(ct, cda);
1347 if (err == 0) { 1329 if (err == 0) {
1348 nf_conntrack_get(&ct->ct_general); 1330 nf_conntrack_get(&ct->ct_general);
@@ -1533,6 +1515,7 @@ static int ctnetlink_expect_event(struct notifier_block *this,
1533nla_put_failure: 1515nla_put_failure:
1534 rcu_read_unlock(); 1516 rcu_read_unlock();
1535nlmsg_failure: 1517nlmsg_failure:
1518 nfnetlink_set_err(0, 0, -ENOBUFS);
1536 kfree_skb(skb); 1519 kfree_skb(skb);
1537 return NOTIFY_DONE; 1520 return NOTIFY_DONE;
1538} 1521}
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 592d73344d46..9a62b4efa0e1 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -74,27 +74,6 @@ EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find);
74 74
75/* this is guaranteed to always return a valid protocol helper, since 75/* this is guaranteed to always return a valid protocol helper, since
76 * it falls back to generic_protocol */ 76 * it falls back to generic_protocol */
77struct nf_conntrack_l4proto *
78nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t l4proto)
79{
80 struct nf_conntrack_l4proto *p;
81
82 rcu_read_lock();
83 p = __nf_ct_l4proto_find(l3proto, l4proto);
84 if (!try_module_get(p->me))
85 p = &nf_conntrack_l4proto_generic;
86 rcu_read_unlock();
87
88 return p;
89}
90EXPORT_SYMBOL_GPL(nf_ct_l4proto_find_get);
91
92void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p)
93{
94 module_put(p->me);
95}
96EXPORT_SYMBOL_GPL(nf_ct_l4proto_put);
97
98struct nf_conntrack_l3proto * 77struct nf_conntrack_l3proto *
99nf_ct_l3proto_find_get(u_int16_t l3proto) 78nf_ct_l3proto_find_get(u_int16_t l3proto)
100{ 79{
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 8fcf1762fabf..d3d5a7fd73ce 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -16,6 +16,9 @@
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/dccp.h> 17#include <linux/dccp.h>
18 18
19#include <net/net_namespace.h>
20#include <net/netns/generic.h>
21
19#include <linux/netfilter/nfnetlink_conntrack.h> 22#include <linux/netfilter/nfnetlink_conntrack.h>
20#include <net/netfilter/nf_conntrack.h> 23#include <net/netfilter/nf_conntrack.h>
21#include <net/netfilter/nf_conntrack_l4proto.h> 24#include <net/netfilter/nf_conntrack_l4proto.h>
@@ -23,8 +26,6 @@
23 26
24static DEFINE_RWLOCK(dccp_lock); 27static DEFINE_RWLOCK(dccp_lock);
25 28
26static int nf_ct_dccp_loose __read_mostly = 1;
27
28/* Timeouts are based on values from RFC4340: 29/* Timeouts are based on values from RFC4340:
29 * 30 *
30 * - REQUEST: 31 * - REQUEST:
@@ -72,16 +73,6 @@ static int nf_ct_dccp_loose __read_mostly = 1;
72 73
73#define DCCP_MSL (2 * 60 * HZ) 74#define DCCP_MSL (2 * 60 * HZ)
74 75
75static unsigned int dccp_timeout[CT_DCCP_MAX + 1] __read_mostly = {
76 [CT_DCCP_REQUEST] = 2 * DCCP_MSL,
77 [CT_DCCP_RESPOND] = 4 * DCCP_MSL,
78 [CT_DCCP_PARTOPEN] = 4 * DCCP_MSL,
79 [CT_DCCP_OPEN] = 12 * 3600 * HZ,
80 [CT_DCCP_CLOSEREQ] = 64 * HZ,
81 [CT_DCCP_CLOSING] = 64 * HZ,
82 [CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL,
83};
84
85static const char * const dccp_state_names[] = { 76static const char * const dccp_state_names[] = {
86 [CT_DCCP_NONE] = "NONE", 77 [CT_DCCP_NONE] = "NONE",
87 [CT_DCCP_REQUEST] = "REQUEST", 78 [CT_DCCP_REQUEST] = "REQUEST",
@@ -393,6 +384,22 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
393 }, 384 },
394}; 385};
395 386
387/* this module per-net specifics */
388static int dccp_net_id;
389struct dccp_net {
390 int dccp_loose;
391 unsigned int dccp_timeout[CT_DCCP_MAX + 1];
392#ifdef CONFIG_SYSCTL
393 struct ctl_table_header *sysctl_header;
394 struct ctl_table *sysctl_table;
395#endif
396};
397
398static inline struct dccp_net *dccp_pernet(struct net *net)
399{
400 return net_generic(net, dccp_net_id);
401}
402
396static bool dccp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 403static bool dccp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
397 struct nf_conntrack_tuple *tuple) 404 struct nf_conntrack_tuple *tuple)
398{ 405{
@@ -419,6 +426,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
419 unsigned int dataoff) 426 unsigned int dataoff)
420{ 427{
421 struct net *net = nf_ct_net(ct); 428 struct net *net = nf_ct_net(ct);
429 struct dccp_net *dn;
422 struct dccp_hdr _dh, *dh; 430 struct dccp_hdr _dh, *dh;
423 const char *msg; 431 const char *msg;
424 u_int8_t state; 432 u_int8_t state;
@@ -429,7 +437,8 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
429 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE]; 437 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
430 switch (state) { 438 switch (state) {
431 default: 439 default:
432 if (nf_ct_dccp_loose == 0) { 440 dn = dccp_pernet(net);
441 if (dn->dccp_loose == 0) {
433 msg = "nf_ct_dccp: not picking up existing connection "; 442 msg = "nf_ct_dccp: not picking up existing connection ";
434 goto out_invalid; 443 goto out_invalid;
435 } 444 }
@@ -465,6 +474,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
465 u_int8_t pf, unsigned int hooknum) 474 u_int8_t pf, unsigned int hooknum)
466{ 475{
467 struct net *net = nf_ct_net(ct); 476 struct net *net = nf_ct_net(ct);
477 struct dccp_net *dn;
468 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 478 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
469 struct dccp_hdr _dh, *dh; 479 struct dccp_hdr _dh, *dh;
470 u_int8_t type, old_state, new_state; 480 u_int8_t type, old_state, new_state;
@@ -542,7 +552,9 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
542 ct->proto.dccp.last_pkt = type; 552 ct->proto.dccp.last_pkt = type;
543 ct->proto.dccp.state = new_state; 553 ct->proto.dccp.state = new_state;
544 write_unlock_bh(&dccp_lock); 554 write_unlock_bh(&dccp_lock);
545 nf_ct_refresh_acct(ct, ctinfo, skb, dccp_timeout[new_state]); 555
556 dn = dccp_pernet(net);
557 nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]);
546 558
547 return NF_ACCEPT; 559 return NF_ACCEPT;
548} 560}
@@ -660,13 +672,11 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
660#endif 672#endif
661 673
662#ifdef CONFIG_SYSCTL 674#ifdef CONFIG_SYSCTL
663static unsigned int dccp_sysctl_table_users; 675/* template, data assigned later */
664static struct ctl_table_header *dccp_sysctl_header; 676static struct ctl_table dccp_sysctl_table[] = {
665static ctl_table dccp_sysctl_table[] = {
666 { 677 {
667 .ctl_name = CTL_UNNUMBERED, 678 .ctl_name = CTL_UNNUMBERED,
668 .procname = "nf_conntrack_dccp_timeout_request", 679 .procname = "nf_conntrack_dccp_timeout_request",
669 .data = &dccp_timeout[CT_DCCP_REQUEST],
670 .maxlen = sizeof(unsigned int), 680 .maxlen = sizeof(unsigned int),
671 .mode = 0644, 681 .mode = 0644,
672 .proc_handler = proc_dointvec_jiffies, 682 .proc_handler = proc_dointvec_jiffies,
@@ -674,7 +684,6 @@ static ctl_table dccp_sysctl_table[] = {
674 { 684 {
675 .ctl_name = CTL_UNNUMBERED, 685 .ctl_name = CTL_UNNUMBERED,
676 .procname = "nf_conntrack_dccp_timeout_respond", 686 .procname = "nf_conntrack_dccp_timeout_respond",
677 .data = &dccp_timeout[CT_DCCP_RESPOND],
678 .maxlen = sizeof(unsigned int), 687 .maxlen = sizeof(unsigned int),
679 .mode = 0644, 688 .mode = 0644,
680 .proc_handler = proc_dointvec_jiffies, 689 .proc_handler = proc_dointvec_jiffies,
@@ -682,7 +691,6 @@ static ctl_table dccp_sysctl_table[] = {
682 { 691 {
683 .ctl_name = CTL_UNNUMBERED, 692 .ctl_name = CTL_UNNUMBERED,
684 .procname = "nf_conntrack_dccp_timeout_partopen", 693 .procname = "nf_conntrack_dccp_timeout_partopen",
685 .data = &dccp_timeout[CT_DCCP_PARTOPEN],
686 .maxlen = sizeof(unsigned int), 694 .maxlen = sizeof(unsigned int),
687 .mode = 0644, 695 .mode = 0644,
688 .proc_handler = proc_dointvec_jiffies, 696 .proc_handler = proc_dointvec_jiffies,
@@ -690,7 +698,6 @@ static ctl_table dccp_sysctl_table[] = {
690 { 698 {
691 .ctl_name = CTL_UNNUMBERED, 699 .ctl_name = CTL_UNNUMBERED,
692 .procname = "nf_conntrack_dccp_timeout_open", 700 .procname = "nf_conntrack_dccp_timeout_open",
693 .data = &dccp_timeout[CT_DCCP_OPEN],
694 .maxlen = sizeof(unsigned int), 701 .maxlen = sizeof(unsigned int),
695 .mode = 0644, 702 .mode = 0644,
696 .proc_handler = proc_dointvec_jiffies, 703 .proc_handler = proc_dointvec_jiffies,
@@ -698,7 +705,6 @@ static ctl_table dccp_sysctl_table[] = {
698 { 705 {
699 .ctl_name = CTL_UNNUMBERED, 706 .ctl_name = CTL_UNNUMBERED,
700 .procname = "nf_conntrack_dccp_timeout_closereq", 707 .procname = "nf_conntrack_dccp_timeout_closereq",
701 .data = &dccp_timeout[CT_DCCP_CLOSEREQ],
702 .maxlen = sizeof(unsigned int), 708 .maxlen = sizeof(unsigned int),
703 .mode = 0644, 709 .mode = 0644,
704 .proc_handler = proc_dointvec_jiffies, 710 .proc_handler = proc_dointvec_jiffies,
@@ -706,7 +712,6 @@ static ctl_table dccp_sysctl_table[] = {
706 { 712 {
707 .ctl_name = CTL_UNNUMBERED, 713 .ctl_name = CTL_UNNUMBERED,
708 .procname = "nf_conntrack_dccp_timeout_closing", 714 .procname = "nf_conntrack_dccp_timeout_closing",
709 .data = &dccp_timeout[CT_DCCP_CLOSING],
710 .maxlen = sizeof(unsigned int), 715 .maxlen = sizeof(unsigned int),
711 .mode = 0644, 716 .mode = 0644,
712 .proc_handler = proc_dointvec_jiffies, 717 .proc_handler = proc_dointvec_jiffies,
@@ -714,7 +719,6 @@ static ctl_table dccp_sysctl_table[] = {
714 { 719 {
715 .ctl_name = CTL_UNNUMBERED, 720 .ctl_name = CTL_UNNUMBERED,
716 .procname = "nf_conntrack_dccp_timeout_timewait", 721 .procname = "nf_conntrack_dccp_timeout_timewait",
717 .data = &dccp_timeout[CT_DCCP_TIMEWAIT],
718 .maxlen = sizeof(unsigned int), 722 .maxlen = sizeof(unsigned int),
719 .mode = 0644, 723 .mode = 0644,
720 .proc_handler = proc_dointvec_jiffies, 724 .proc_handler = proc_dointvec_jiffies,
@@ -722,8 +726,7 @@ static ctl_table dccp_sysctl_table[] = {
722 { 726 {
723 .ctl_name = CTL_UNNUMBERED, 727 .ctl_name = CTL_UNNUMBERED,
724 .procname = "nf_conntrack_dccp_loose", 728 .procname = "nf_conntrack_dccp_loose",
725 .data = &nf_ct_dccp_loose, 729 .maxlen = sizeof(int),
726 .maxlen = sizeof(nf_ct_dccp_loose),
727 .mode = 0644, 730 .mode = 0644,
728 .proc_handler = proc_dointvec, 731 .proc_handler = proc_dointvec,
729 }, 732 },
@@ -751,11 +754,6 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
751 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 754 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
752 .nla_policy = nf_ct_port_nla_policy, 755 .nla_policy = nf_ct_port_nla_policy,
753#endif 756#endif
754#ifdef CONFIG_SYSCTL
755 .ctl_table_users = &dccp_sysctl_table_users,
756 .ctl_table_header = &dccp_sysctl_header,
757 .ctl_table = dccp_sysctl_table,
758#endif
759}; 757};
760 758
761static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = { 759static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
@@ -776,34 +774,107 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
776 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 774 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
777 .nla_policy = nf_ct_port_nla_policy, 775 .nla_policy = nf_ct_port_nla_policy,
778#endif 776#endif
777};
778
779static __net_init int dccp_net_init(struct net *net)
780{
781 struct dccp_net *dn;
782 int err;
783
784 dn = kmalloc(sizeof(*dn), GFP_KERNEL);
785 if (!dn)
786 return -ENOMEM;
787
788 /* default values */
789 dn->dccp_loose = 1;
790 dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL;
791 dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL;
792 dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL;
793 dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ;
794 dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
795 dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
796 dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
797
798 err = net_assign_generic(net, dccp_net_id, dn);
799 if (err)
800 goto out;
801
779#ifdef CONFIG_SYSCTL 802#ifdef CONFIG_SYSCTL
780 .ctl_table_users = &dccp_sysctl_table_users, 803 err = -ENOMEM;
781 .ctl_table_header = &dccp_sysctl_header, 804 dn->sysctl_table = kmemdup(dccp_sysctl_table,
782 .ctl_table = dccp_sysctl_table, 805 sizeof(dccp_sysctl_table), GFP_KERNEL);
806 if (!dn->sysctl_table)
807 goto out;
808
809 dn->sysctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST];
810 dn->sysctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND];
811 dn->sysctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN];
812 dn->sysctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN];
813 dn->sysctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ];
814 dn->sysctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING];
815 dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
816 dn->sysctl_table[7].data = &dn->dccp_loose;
817
818 dn->sysctl_header = register_net_sysctl_table(net,
819 nf_net_netfilter_sysctl_path, dn->sysctl_table);
820 if (!dn->sysctl_header) {
821 kfree(dn->sysctl_table);
822 goto out;
823 }
783#endif 824#endif
825
826 return 0;
827
828out:
829 kfree(dn);
830 return err;
831}
832
833static __net_exit void dccp_net_exit(struct net *net)
834{
835 struct dccp_net *dn = dccp_pernet(net);
836#ifdef CONFIG_SYSCTL
837 unregister_net_sysctl_table(dn->sysctl_header);
838 kfree(dn->sysctl_table);
839#endif
840 kfree(dn);
841
842 net_assign_generic(net, dccp_net_id, NULL);
843}
844
845static struct pernet_operations dccp_net_ops = {
846 .init = dccp_net_init,
847 .exit = dccp_net_exit,
784}; 848};
785 849
786static int __init nf_conntrack_proto_dccp_init(void) 850static int __init nf_conntrack_proto_dccp_init(void)
787{ 851{
788 int err; 852 int err;
789 853
790 err = nf_conntrack_l4proto_register(&dccp_proto4); 854 err = register_pernet_gen_subsys(&dccp_net_id, &dccp_net_ops);
791 if (err < 0) 855 if (err < 0)
792 goto err1; 856 goto err1;
793 857
794 err = nf_conntrack_l4proto_register(&dccp_proto6); 858 err = nf_conntrack_l4proto_register(&dccp_proto4);
795 if (err < 0) 859 if (err < 0)
796 goto err2; 860 goto err2;
861
862 err = nf_conntrack_l4proto_register(&dccp_proto6);
863 if (err < 0)
864 goto err3;
797 return 0; 865 return 0;
798 866
799err2: 867err3:
800 nf_conntrack_l4proto_unregister(&dccp_proto4); 868 nf_conntrack_l4proto_unregister(&dccp_proto4);
869err2:
870 unregister_pernet_gen_subsys(dccp_net_id, &dccp_net_ops);
801err1: 871err1:
802 return err; 872 return err;
803} 873}
804 874
805static void __exit nf_conntrack_proto_dccp_fini(void) 875static void __exit nf_conntrack_proto_dccp_fini(void)
806{ 876{
877 unregister_pernet_gen_subsys(dccp_net_id, &dccp_net_ops);
807 nf_conntrack_l4proto_unregister(&dccp_proto6); 878 nf_conntrack_l4proto_unregister(&dccp_proto6);
808 nf_conntrack_l4proto_unregister(&dccp_proto4); 879 nf_conntrack_l4proto_unregister(&dccp_proto4);
809} 880}
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 4be80d7b8795..829374f426c4 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -92,7 +92,7 @@ static struct ctl_table generic_compat_sysctl_table[] = {
92struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly = 92struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
93{ 93{
94 .l3proto = PF_UNSPEC, 94 .l3proto = PF_UNSPEC,
95 .l4proto = 0, 95 .l4proto = 255,
96 .name = "unknown", 96 .name = "unknown",
97 .pkt_to_tuple = generic_pkt_to_tuple, 97 .pkt_to_tuple = generic_pkt_to_tuple,
98 .invert_tuple = generic_invert_tuple, 98 .invert_tuple = generic_invert_tuple,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index f3fd154d1ddd..e46f3b79adb3 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -25,6 +25,8 @@
25#include <net/netfilter/nf_conntrack_l4proto.h> 25#include <net/netfilter/nf_conntrack_l4proto.h>
26#include <net/netfilter/nf_conntrack_ecache.h> 26#include <net/netfilter/nf_conntrack_ecache.h>
27#include <net/netfilter/nf_log.h> 27#include <net/netfilter/nf_log.h>
28#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
29#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
28 30
29/* Protects ct->proto.tcp */ 31/* Protects ct->proto.tcp */
30static DEFINE_RWLOCK(tcp_lock); 32static DEFINE_RWLOCK(tcp_lock);
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 2b8b1f579f93..d4021179e24e 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -22,6 +22,8 @@
22#include <net/netfilter/nf_conntrack_l4proto.h> 22#include <net/netfilter/nf_conntrack_l4proto.h>
23#include <net/netfilter/nf_conntrack_ecache.h> 23#include <net/netfilter/nf_conntrack_ecache.h>
24#include <net/netfilter/nf_log.h> 24#include <net/netfilter/nf_log.h>
25#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
26#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
25 27
26static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ; 28static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ;
27static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ; 29static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index fa8ae5d2659c..8bb998fe098b 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -14,58 +14,63 @@
14 LOG target modules */ 14 LOG target modules */
15 15
16#define NF_LOG_PREFIXLEN 128 16#define NF_LOG_PREFIXLEN 128
17#define NFLOGGER_NAME_LEN 64
17 18
18static const struct nf_logger *nf_loggers[NFPROTO_NUMPROTO] __read_mostly; 19static const struct nf_logger *nf_loggers[NFPROTO_NUMPROTO] __read_mostly;
20static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly;
19static DEFINE_MUTEX(nf_log_mutex); 21static DEFINE_MUTEX(nf_log_mutex);
20 22
21/* return EBUSY if somebody else is registered, EEXIST if the same logger 23static struct nf_logger *__find_logger(int pf, const char *str_logger)
22 * is registred, 0 on success. */
23int nf_log_register(u_int8_t pf, const struct nf_logger *logger)
24{ 24{
25 int ret; 25 struct nf_logger *t;
26 26
27 if (pf >= ARRAY_SIZE(nf_loggers)) 27 list_for_each_entry(t, &nf_loggers_l[pf], list[pf]) {
28 return -EINVAL; 28 if (!strnicmp(str_logger, t->name, strlen(t->name)))
29 29 return t;
30 /* Any setup of logging members must be done before 30 }
31 * substituting pointer. */
32 ret = mutex_lock_interruptible(&nf_log_mutex);
33 if (ret < 0)
34 return ret;
35
36 if (!nf_loggers[pf])
37 rcu_assign_pointer(nf_loggers[pf], logger);
38 else if (nf_loggers[pf] == logger)
39 ret = -EEXIST;
40 else
41 ret = -EBUSY;
42 31
43 mutex_unlock(&nf_log_mutex); 32 return NULL;
44 return ret;
45} 33}
46EXPORT_SYMBOL(nf_log_register);
47 34
48void nf_log_unregister_pf(u_int8_t pf) 35/* return EEXIST if the same logger is registred, 0 on success. */
36int nf_log_register(u_int8_t pf, struct nf_logger *logger)
49{ 37{
38 const struct nf_logger *llog;
39
50 if (pf >= ARRAY_SIZE(nf_loggers)) 40 if (pf >= ARRAY_SIZE(nf_loggers))
51 return; 41 return -EINVAL;
42
52 mutex_lock(&nf_log_mutex); 43 mutex_lock(&nf_log_mutex);
53 rcu_assign_pointer(nf_loggers[pf], NULL); 44
45 if (pf == NFPROTO_UNSPEC) {
46 int i;
47 for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
48 list_add_tail(&(logger->list[i]), &(nf_loggers_l[i]));
49 } else {
50 /* register at end of list to honor first register win */
51 list_add_tail(&logger->list[pf], &nf_loggers_l[pf]);
52 llog = rcu_dereference(nf_loggers[pf]);
53 if (llog == NULL)
54 rcu_assign_pointer(nf_loggers[pf], logger);
55 }
56
54 mutex_unlock(&nf_log_mutex); 57 mutex_unlock(&nf_log_mutex);
55 58
56 /* Give time to concurrent readers. */ 59 return 0;
57 synchronize_rcu();
58} 60}
59EXPORT_SYMBOL(nf_log_unregister_pf); 61EXPORT_SYMBOL(nf_log_register);
60 62
61void nf_log_unregister(const struct nf_logger *logger) 63void nf_log_unregister(struct nf_logger *logger)
62{ 64{
65 const struct nf_logger *c_logger;
63 int i; 66 int i;
64 67
65 mutex_lock(&nf_log_mutex); 68 mutex_lock(&nf_log_mutex);
66 for (i = 0; i < ARRAY_SIZE(nf_loggers); i++) { 69 for (i = 0; i < ARRAY_SIZE(nf_loggers); i++) {
67 if (nf_loggers[i] == logger) 70 c_logger = rcu_dereference(nf_loggers[i]);
71 if (c_logger == logger)
68 rcu_assign_pointer(nf_loggers[i], NULL); 72 rcu_assign_pointer(nf_loggers[i], NULL);
73 list_del(&logger->list[i]);
69 } 74 }
70 mutex_unlock(&nf_log_mutex); 75 mutex_unlock(&nf_log_mutex);
71 76
@@ -73,6 +78,27 @@ void nf_log_unregister(const struct nf_logger *logger)
73} 78}
74EXPORT_SYMBOL(nf_log_unregister); 79EXPORT_SYMBOL(nf_log_unregister);
75 80
81int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
82{
83 mutex_lock(&nf_log_mutex);
84 if (__find_logger(pf, logger->name) == NULL) {
85 mutex_unlock(&nf_log_mutex);
86 return -ENOENT;
87 }
88 rcu_assign_pointer(nf_loggers[pf], logger);
89 mutex_unlock(&nf_log_mutex);
90 return 0;
91}
92EXPORT_SYMBOL(nf_log_bind_pf);
93
94void nf_log_unbind_pf(u_int8_t pf)
95{
96 mutex_lock(&nf_log_mutex);
97 rcu_assign_pointer(nf_loggers[pf], NULL);
98 mutex_unlock(&nf_log_mutex);
99}
100EXPORT_SYMBOL(nf_log_unbind_pf);
101
76void nf_log_packet(u_int8_t pf, 102void nf_log_packet(u_int8_t pf,
77 unsigned int hooknum, 103 unsigned int hooknum,
78 const struct sk_buff *skb, 104 const struct sk_buff *skb,
@@ -129,13 +155,37 @@ static int seq_show(struct seq_file *s, void *v)
129{ 155{
130 loff_t *pos = v; 156 loff_t *pos = v;
131 const struct nf_logger *logger; 157 const struct nf_logger *logger;
158 struct nf_logger *t;
159 int ret;
132 160
133 logger = rcu_dereference(nf_loggers[*pos]); 161 logger = rcu_dereference(nf_loggers[*pos]);
134 162
135 if (!logger) 163 if (!logger)
136 return seq_printf(s, "%2lld NONE\n", *pos); 164 ret = seq_printf(s, "%2lld NONE (", *pos);
165 else
166 ret = seq_printf(s, "%2lld %s (", *pos, logger->name);
167
168 if (ret < 0)
169 return ret;
170
171 mutex_lock(&nf_log_mutex);
172 list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) {
173 ret = seq_printf(s, "%s", t->name);
174 if (ret < 0) {
175 mutex_unlock(&nf_log_mutex);
176 return ret;
177 }
178 if (&t->list[*pos] != nf_loggers_l[*pos].prev) {
179 ret = seq_printf(s, ",");
180 if (ret < 0) {
181 mutex_unlock(&nf_log_mutex);
182 return ret;
183 }
184 }
185 }
186 mutex_unlock(&nf_log_mutex);
137 187
138 return seq_printf(s, "%2lld %s\n", *pos, logger->name); 188 return seq_printf(s, ")\n");
139} 189}
140 190
141static const struct seq_operations nflog_seq_ops = { 191static const struct seq_operations nflog_seq_ops = {
@@ -158,15 +208,102 @@ static const struct file_operations nflog_file_ops = {
158 .release = seq_release, 208 .release = seq_release,
159}; 209};
160 210
211
161#endif /* PROC_FS */ 212#endif /* PROC_FS */
162 213
214#ifdef CONFIG_SYSCTL
215struct ctl_path nf_log_sysctl_path[] = {
216 { .procname = "net", .ctl_name = CTL_NET, },
217 { .procname = "netfilter", .ctl_name = NET_NETFILTER, },
218 { .procname = "nf_log", .ctl_name = CTL_UNNUMBERED, },
219 { }
220};
221
222static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
223static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
224static struct ctl_table_header *nf_log_dir_header;
225
226static int nf_log_proc_dostring(ctl_table *table, int write, struct file *filp,
227 void *buffer, size_t *lenp, loff_t *ppos)
228{
229 const struct nf_logger *logger;
230 int r = 0;
231 int tindex = (unsigned long)table->extra1;
232
233 if (write) {
234 if (!strcmp(buffer, "NONE")) {
235 nf_log_unbind_pf(tindex);
236 return 0;
237 }
238 mutex_lock(&nf_log_mutex);
239 logger = __find_logger(tindex, buffer);
240 if (logger == NULL) {
241 mutex_unlock(&nf_log_mutex);
242 return -ENOENT;
243 }
244 rcu_assign_pointer(nf_loggers[tindex], logger);
245 mutex_unlock(&nf_log_mutex);
246 } else {
247 rcu_read_lock();
248 logger = rcu_dereference(nf_loggers[tindex]);
249 if (!logger)
250 table->data = "NONE";
251 else
252 table->data = logger->name;
253 r = proc_dostring(table, write, filp, buffer, lenp, ppos);
254 rcu_read_unlock();
255 }
256
257 return r;
258}
259
260static __init int netfilter_log_sysctl_init(void)
261{
262 int i;
263
264 for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) {
265 snprintf(nf_log_sysctl_fnames[i-NFPROTO_UNSPEC], 3, "%d", i);
266 nf_log_sysctl_table[i].ctl_name = CTL_UNNUMBERED;
267 nf_log_sysctl_table[i].procname =
268 nf_log_sysctl_fnames[i-NFPROTO_UNSPEC];
269 nf_log_sysctl_table[i].data = NULL;
270 nf_log_sysctl_table[i].maxlen =
271 NFLOGGER_NAME_LEN * sizeof(char);
272 nf_log_sysctl_table[i].mode = 0644;
273 nf_log_sysctl_table[i].proc_handler = nf_log_proc_dostring;
274 nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i;
275 }
276
277 nf_log_dir_header = register_sysctl_paths(nf_log_sysctl_path,
278 nf_log_sysctl_table);
279 if (!nf_log_dir_header)
280 return -ENOMEM;
281
282 return 0;
283}
284#else
285static __init int netfilter_log_sysctl_init(void)
286{
287 return 0;
288}
289#endif /* CONFIG_SYSCTL */
163 290
164int __init netfilter_log_init(void) 291int __init netfilter_log_init(void)
165{ 292{
293 int i, r;
166#ifdef CONFIG_PROC_FS 294#ifdef CONFIG_PROC_FS
167 if (!proc_create("nf_log", S_IRUGO, 295 if (!proc_create("nf_log", S_IRUGO,
168 proc_net_netfilter, &nflog_file_ops)) 296 proc_net_netfilter, &nflog_file_ops))
169 return -1; 297 return -1;
170#endif 298#endif
299
300 /* Errors will trigger panic, unroll on error is unnecessary. */
301 r = netfilter_log_sysctl_init();
302 if (r < 0)
303 return r;
304
305 for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
306 INIT_LIST_HEAD(&(nf_loggers_l[i]));
307
171 return 0; 308 return 0;
172} 309}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 9c0ba17a1ddb..2785d66a7e38 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -113,6 +113,12 @@ int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo)
113} 113}
114EXPORT_SYMBOL_GPL(nfnetlink_send); 114EXPORT_SYMBOL_GPL(nfnetlink_send);
115 115
116void nfnetlink_set_err(u32 pid, u32 group, int error)
117{
118 netlink_set_err(nfnl, pid, group, error);
119}
120EXPORT_SYMBOL_GPL(nfnetlink_set_err);
121
116int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags) 122int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags)
117{ 123{
118 return netlink_unicast(nfnl, skb, pid, flags); 124 return netlink_unicast(nfnl, skb, pid, flags);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index c712e9fc6bba..fd326ac27ec8 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -693,7 +693,7 @@ nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
693 return -ENOTSUPP; 693 return -ENOTSUPP;
694} 694}
695 695
696static const struct nf_logger nfulnl_logger = { 696static struct nf_logger nfulnl_logger __read_mostly = {
697 .name = "nfnetlink_log", 697 .name = "nfnetlink_log",
698 .logfn = &nfulnl_log_packet, 698 .logfn = &nfulnl_log_packet,
699 .me = THIS_MODULE, 699 .me = THIS_MODULE,
@@ -725,9 +725,9 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
725 /* Commands without queue context */ 725 /* Commands without queue context */
726 switch (cmd->command) { 726 switch (cmd->command) {
727 case NFULNL_CFG_CMD_PF_BIND: 727 case NFULNL_CFG_CMD_PF_BIND:
728 return nf_log_register(pf, &nfulnl_logger); 728 return nf_log_bind_pf(pf, &nfulnl_logger);
729 case NFULNL_CFG_CMD_PF_UNBIND: 729 case NFULNL_CFG_CMD_PF_UNBIND:
730 nf_log_unregister_pf(pf); 730 nf_log_unbind_pf(pf);
731 return 0; 731 return 0;
732 } 732 }
733 } 733 }
@@ -952,17 +952,25 @@ static int __init nfnetlink_log_init(void)
952 goto cleanup_netlink_notifier; 952 goto cleanup_netlink_notifier;
953 } 953 }
954 954
955 status = nf_log_register(NFPROTO_UNSPEC, &nfulnl_logger);
956 if (status < 0) {
957 printk(KERN_ERR "log: failed to register logger\n");
958 goto cleanup_subsys;
959 }
960
955#ifdef CONFIG_PROC_FS 961#ifdef CONFIG_PROC_FS
956 if (!proc_create("nfnetlink_log", 0440, 962 if (!proc_create("nfnetlink_log", 0440,
957 proc_net_netfilter, &nful_file_ops)) 963 proc_net_netfilter, &nful_file_ops))
958 goto cleanup_subsys; 964 goto cleanup_logger;
959#endif 965#endif
960 return status; 966 return status;
961 967
962#ifdef CONFIG_PROC_FS 968#ifdef CONFIG_PROC_FS
969cleanup_logger:
970 nf_log_unregister(&nfulnl_logger);
971#endif
963cleanup_subsys: 972cleanup_subsys:
964 nfnetlink_subsys_unregister(&nfulnl_subsys); 973 nfnetlink_subsys_unregister(&nfulnl_subsys);
965#endif
966cleanup_netlink_notifier: 974cleanup_netlink_notifier:
967 netlink_unregister_notifier(&nfulnl_rtnl_notifier); 975 netlink_unregister_notifier(&nfulnl_rtnl_notifier);
968 return status; 976 return status;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 5baccfa5a0de..509a95621f9f 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -625,6 +625,20 @@ void xt_free_table_info(struct xt_table_info *info)
625} 625}
626EXPORT_SYMBOL(xt_free_table_info); 626EXPORT_SYMBOL(xt_free_table_info);
627 627
628void xt_table_entry_swap_rcu(struct xt_table_info *oldinfo,
629 struct xt_table_info *newinfo)
630{
631 unsigned int cpu;
632
633 for_each_possible_cpu(cpu) {
634 void *p = oldinfo->entries[cpu];
635 rcu_assign_pointer(oldinfo->entries[cpu], newinfo->entries[cpu]);
636 newinfo->entries[cpu] = p;
637 }
638
639}
640EXPORT_SYMBOL_GPL(xt_table_entry_swap_rcu);
641
628/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ 642/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
629struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, 643struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
630 const char *name) 644 const char *name)
@@ -671,21 +685,22 @@ xt_replace_table(struct xt_table *table,
671 struct xt_table_info *oldinfo, *private; 685 struct xt_table_info *oldinfo, *private;
672 686
673 /* Do the substitution. */ 687 /* Do the substitution. */
674 write_lock_bh(&table->lock); 688 mutex_lock(&table->lock);
675 private = table->private; 689 private = table->private;
676 /* Check inside lock: is the old number correct? */ 690 /* Check inside lock: is the old number correct? */
677 if (num_counters != private->number) { 691 if (num_counters != private->number) {
678 duprintf("num_counters != table->private->number (%u/%u)\n", 692 duprintf("num_counters != table->private->number (%u/%u)\n",
679 num_counters, private->number); 693 num_counters, private->number);
680 write_unlock_bh(&table->lock); 694 mutex_unlock(&table->lock);
681 *error = -EAGAIN; 695 *error = -EAGAIN;
682 return NULL; 696 return NULL;
683 } 697 }
684 oldinfo = private; 698 oldinfo = private;
685 table->private = newinfo; 699 rcu_assign_pointer(table->private, newinfo);
686 newinfo->initial_entries = oldinfo->initial_entries; 700 newinfo->initial_entries = oldinfo->initial_entries;
687 write_unlock_bh(&table->lock); 701 mutex_unlock(&table->lock);
688 702
703 synchronize_net();
689 return oldinfo; 704 return oldinfo;
690} 705}
691EXPORT_SYMBOL_GPL(xt_replace_table); 706EXPORT_SYMBOL_GPL(xt_replace_table);
@@ -719,7 +734,8 @@ struct xt_table *xt_register_table(struct net *net, struct xt_table *table,
719 734
720 /* Simplifies replace_table code. */ 735 /* Simplifies replace_table code. */
721 table->private = bootstrap; 736 table->private = bootstrap;
722 rwlock_init(&table->lock); 737 mutex_init(&table->lock);
738
723 if (!xt_replace_table(table, 0, newinfo, &ret)) 739 if (!xt_replace_table(table, 0, newinfo, &ret))
724 goto unlock; 740 goto unlock;
725 741
diff --git a/net/netfilter/xt_HL.c b/net/netfilter/xt_HL.c
new file mode 100644
index 000000000000..10e789e2d12a
--- /dev/null
+++ b/net/netfilter/xt_HL.c
@@ -0,0 +1,171 @@
1/*
2 * TTL modification target for IP tables
3 * (C) 2000,2005 by Harald Welte <laforge@netfilter.org>
4 *
5 * Hop Limit modification target for ip6tables
6 * Maciej Soltysiak <solt@dns.toxicfilms.tv>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/skbuff.h>
15#include <linux/ip.h>
16#include <linux/ipv6.h>
17#include <net/checksum.h>
18
19#include <linux/netfilter/x_tables.h>
20#include <linux/netfilter_ipv4/ipt_TTL.h>
21#include <linux/netfilter_ipv6/ip6t_HL.h>
22
23MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
24MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
25MODULE_DESCRIPTION("Xtables: Hoplimit/TTL Limit field modification target");
26MODULE_LICENSE("GPL");
27
28static unsigned int
29ttl_tg(struct sk_buff *skb, const struct xt_target_param *par)
30{
31 struct iphdr *iph;
32 const struct ipt_TTL_info *info = par->targinfo;
33 int new_ttl;
34
35 if (!skb_make_writable(skb, skb->len))
36 return NF_DROP;
37
38 iph = ip_hdr(skb);
39
40 switch (info->mode) {
41 case IPT_TTL_SET:
42 new_ttl = info->ttl;
43 break;
44 case IPT_TTL_INC:
45 new_ttl = iph->ttl + info->ttl;
46 if (new_ttl > 255)
47 new_ttl = 255;
48 break;
49 case IPT_TTL_DEC:
50 new_ttl = iph->ttl - info->ttl;
51 if (new_ttl < 0)
52 new_ttl = 0;
53 break;
54 default:
55 new_ttl = iph->ttl;
56 break;
57 }
58
59 if (new_ttl != iph->ttl) {
60 csum_replace2(&iph->check, htons(iph->ttl << 8),
61 htons(new_ttl << 8));
62 iph->ttl = new_ttl;
63 }
64
65 return XT_CONTINUE;
66}
67
68static unsigned int
69hl_tg6(struct sk_buff *skb, const struct xt_target_param *par)
70{
71 struct ipv6hdr *ip6h;
72 const struct ip6t_HL_info *info = par->targinfo;
73 int new_hl;
74
75 if (!skb_make_writable(skb, skb->len))
76 return NF_DROP;
77
78 ip6h = ipv6_hdr(skb);
79
80 switch (info->mode) {
81 case IP6T_HL_SET:
82 new_hl = info->hop_limit;
83 break;
84 case IP6T_HL_INC:
85 new_hl = ip6h->hop_limit + info->hop_limit;
86 if (new_hl > 255)
87 new_hl = 255;
88 break;
89 case IP6T_HL_DEC:
90 new_hl = ip6h->hop_limit - info->hop_limit;
91 if (new_hl < 0)
92 new_hl = 0;
93 break;
94 default:
95 new_hl = ip6h->hop_limit;
96 break;
97 }
98
99 ip6h->hop_limit = new_hl;
100
101 return XT_CONTINUE;
102}
103
104static bool ttl_tg_check(const struct xt_tgchk_param *par)
105{
106 const struct ipt_TTL_info *info = par->targinfo;
107
108 if (info->mode > IPT_TTL_MAXMODE) {
109 printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n",
110 info->mode);
111 return false;
112 }
113 if (info->mode != IPT_TTL_SET && info->ttl == 0)
114 return false;
115 return true;
116}
117
118static bool hl_tg6_check(const struct xt_tgchk_param *par)
119{
120 const struct ip6t_HL_info *info = par->targinfo;
121
122 if (info->mode > IP6T_HL_MAXMODE) {
123 printk(KERN_WARNING "ip6t_HL: invalid or unknown Mode %u\n",
124 info->mode);
125 return false;
126 }
127 if (info->mode != IP6T_HL_SET && info->hop_limit == 0) {
128 printk(KERN_WARNING "ip6t_HL: increment/decrement doesn't "
129 "make sense with value 0\n");
130 return false;
131 }
132 return true;
133}
134
135static struct xt_target hl_tg_reg[] __read_mostly = {
136 {
137 .name = "TTL",
138 .revision = 0,
139 .family = NFPROTO_IPV4,
140 .target = ttl_tg,
141 .targetsize = sizeof(struct ipt_TTL_info),
142 .table = "mangle",
143 .checkentry = ttl_tg_check,
144 .me = THIS_MODULE,
145 },
146 {
147 .name = "HL",
148 .revision = 0,
149 .family = NFPROTO_IPV6,
150 .target = hl_tg6,
151 .targetsize = sizeof(struct ip6t_HL_info),
152 .table = "mangle",
153 .checkentry = hl_tg6_check,
154 .me = THIS_MODULE,
155 },
156};
157
158static int __init hl_tg_init(void)
159{
160 return xt_register_targets(hl_tg_reg, ARRAY_SIZE(hl_tg_reg));
161}
162
163static void __exit hl_tg_exit(void)
164{
165 xt_unregister_targets(hl_tg_reg, ARRAY_SIZE(hl_tg_reg));
166}
167
168module_init(hl_tg_init);
169module_exit(hl_tg_exit);
170MODULE_ALIAS("ipt_TTL");
171MODULE_ALIAS("ip6t_HL");
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
new file mode 100644
index 000000000000..8ff7843bb921
--- /dev/null
+++ b/net/netfilter/xt_LED.c
@@ -0,0 +1,161 @@
1/*
2 * xt_LED.c - netfilter target to make LEDs blink upon packet matches
3 *
4 * Copyright (C) 2008 Adam Nielsen <a.nielsen@shikadi.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA.
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/skbuff.h>
24#include <linux/netfilter/x_tables.h>
25#include <linux/leds.h>
26#include <linux/mutex.h>
27
28#include <linux/netfilter/xt_LED.h>
29
30MODULE_LICENSE("GPL");
31MODULE_AUTHOR("Adam Nielsen <a.nielsen@shikadi.net>");
32MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match");
33
34/*
35 * This is declared in here (the kernel module) only, to avoid having these
36 * dependencies in userspace code. This is what xt_led_info.internal_data
37 * points to.
38 */
39struct xt_led_info_internal {
40 struct led_trigger netfilter_led_trigger;
41 struct timer_list timer;
42};
43
44static unsigned int
45led_tg(struct sk_buff *skb, const struct xt_target_param *par)
46{
47 const struct xt_led_info *ledinfo = par->targinfo;
48 struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
49
50 /*
51 * If "always blink" is enabled, and there's still some time until the
52 * LED will switch off, briefly switch it off now.
53 */
54 if ((ledinfo->delay > 0) && ledinfo->always_blink &&
55 timer_pending(&ledinternal->timer))
56 led_trigger_event(&ledinternal->netfilter_led_trigger,LED_OFF);
57
58 led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL);
59
60 /* If there's a positive delay, start/update the timer */
61 if (ledinfo->delay > 0) {
62 mod_timer(&ledinternal->timer,
63 jiffies + msecs_to_jiffies(ledinfo->delay));
64
65 /* Otherwise if there was no delay given, blink as fast as possible */
66 } else if (ledinfo->delay == 0) {
67 led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
68 }
69
70 /* else the delay is negative, which means switch on and stay on */
71
72 return XT_CONTINUE;
73}
74
75static void led_timeout_callback(unsigned long data)
76{
77 struct xt_led_info *ledinfo = (struct xt_led_info *)data;
78 struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
79
80 led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
81}
82
83static bool led_tg_check(const struct xt_tgchk_param *par)
84{
85 struct xt_led_info *ledinfo = par->targinfo;
86 struct xt_led_info_internal *ledinternal;
87 int err;
88
89 if (ledinfo->id[0] == '\0') {
90 printk(KERN_ERR KBUILD_MODNAME ": No 'id' parameter given.\n");
91 return false;
92 }
93
94 ledinternal = kzalloc(sizeof(struct xt_led_info_internal), GFP_KERNEL);
95 if (!ledinternal) {
96 printk(KERN_CRIT KBUILD_MODNAME ": out of memory\n");
97 return false;
98 }
99
100 ledinternal->netfilter_led_trigger.name = ledinfo->id;
101
102 err = led_trigger_register(&ledinternal->netfilter_led_trigger);
103 if (err) {
104 printk(KERN_CRIT KBUILD_MODNAME
105 ": led_trigger_register() failed\n");
106 if (err == -EEXIST)
107 printk(KERN_ERR KBUILD_MODNAME
108 ": Trigger name is already in use.\n");
109 goto exit_alloc;
110 }
111
112 /* See if we need to set up a timer */
113 if (ledinfo->delay > 0)
114 setup_timer(&ledinternal->timer, led_timeout_callback,
115 (unsigned long)ledinfo);
116
117 ledinfo->internal_data = ledinternal;
118
119 return true;
120
121exit_alloc:
122 kfree(ledinternal);
123
124 return false;
125}
126
127static void led_tg_destroy(const struct xt_tgdtor_param *par)
128{
129 const struct xt_led_info *ledinfo = par->targinfo;
130 struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
131
132 if (ledinfo->delay > 0)
133 del_timer_sync(&ledinternal->timer);
134
135 led_trigger_unregister(&ledinternal->netfilter_led_trigger);
136 kfree(ledinternal);
137}
138
139static struct xt_target led_tg_reg __read_mostly = {
140 .name = "LED",
141 .revision = 0,
142 .family = NFPROTO_UNSPEC,
143 .target = led_tg,
144 .targetsize = XT_ALIGN(sizeof(struct xt_led_info)),
145 .checkentry = led_tg_check,
146 .destroy = led_tg_destroy,
147 .me = THIS_MODULE,
148};
149
150static int __init led_tg_init(void)
151{
152 return xt_register_target(&led_tg_reg);
153}
154
155static void __exit led_tg_exit(void)
156{
157 xt_unregister_target(&led_tg_reg);
158}
159
160module_init(led_tg_init);
161module_exit(led_tg_exit);
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
new file mode 100644
index 000000000000..ad5bd890e4e8
--- /dev/null
+++ b/net/netfilter/xt_cluster.c
@@ -0,0 +1,164 @@
1/*
2 * (C) 2008-2009 Pablo Neira Ayuso <pablo@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/module.h>
9#include <linux/skbuff.h>
10#include <linux/jhash.h>
11#include <linux/ip.h>
12#include <net/ipv6.h>
13
14#include <linux/netfilter/x_tables.h>
15#include <net/netfilter/nf_conntrack.h>
16#include <linux/netfilter/xt_cluster.h>
17
18static inline u_int32_t nf_ct_orig_ipv4_src(const struct nf_conn *ct)
19{
20 return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
21}
22
23static inline const void *nf_ct_orig_ipv6_src(const struct nf_conn *ct)
24{
25 return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6;
26}
27
28static inline u_int32_t
29xt_cluster_hash_ipv4(u_int32_t ip, const struct xt_cluster_match_info *info)
30{
31 return jhash_1word(ip, info->hash_seed);
32}
33
34static inline u_int32_t
35xt_cluster_hash_ipv6(const void *ip, const struct xt_cluster_match_info *info)
36{
37 return jhash2(ip, NF_CT_TUPLE_L3SIZE / sizeof(__u32), info->hash_seed);
38}
39
40static inline u_int32_t
41xt_cluster_hash(const struct nf_conn *ct,
42 const struct xt_cluster_match_info *info)
43{
44 u_int32_t hash = 0;
45
46 switch(nf_ct_l3num(ct)) {
47 case AF_INET:
48 hash = xt_cluster_hash_ipv4(nf_ct_orig_ipv4_src(ct), info);
49 break;
50 case AF_INET6:
51 hash = xt_cluster_hash_ipv6(nf_ct_orig_ipv6_src(ct), info);
52 break;
53 default:
54 WARN_ON(1);
55 break;
56 }
57 return (((u64)hash * info->total_nodes) >> 32);
58}
59
60static inline bool
61xt_cluster_is_multicast_addr(const struct sk_buff *skb, u_int8_t family)
62{
63 bool is_multicast = false;
64
65 switch(family) {
66 case NFPROTO_IPV4:
67 is_multicast = ipv4_is_multicast(ip_hdr(skb)->daddr);
68 break;
69 case NFPROTO_IPV6:
70 is_multicast = ipv6_addr_type(&ipv6_hdr(skb)->daddr) &
71 IPV6_ADDR_MULTICAST;
72 break;
73 default:
74 WARN_ON(1);
75 break;
76 }
77 return is_multicast;
78}
79
80static bool
81xt_cluster_mt(const struct sk_buff *skb, const struct xt_match_param *par)
82{
83 struct sk_buff *pskb = (struct sk_buff *)skb;
84 const struct xt_cluster_match_info *info = par->matchinfo;
85 const struct nf_conn *ct;
86 enum ip_conntrack_info ctinfo;
87 unsigned long hash;
88
89 /* This match assumes that all nodes see the same packets. This can be
90 * achieved if the switch that connects the cluster nodes support some
91 * sort of 'port mirroring'. However, if your switch does not support
92 * this, your cluster nodes can reply ARP request using a multicast MAC
93 * address. Thus, your switch will flood the same packets to the
94 * cluster nodes with the same multicast MAC address. Using a multicast
95 * link address is a RFC 1812 (section 3.3.2) violation, but this works
96 * fine in practise.
97 *
98 * Unfortunately, if you use the multicast MAC address, the link layer
99 * sets skbuff's pkt_type to PACKET_MULTICAST, which is not accepted
100 * by TCP and others for packets coming to this node. For that reason,
101 * this match mangles skbuff's pkt_type if it detects a packet
102 * addressed to a unicast address but using PACKET_MULTICAST. Yes, I
103 * know, matches should not alter packets, but we are doing this here
104 * because we would need to add a PKTTYPE target for this sole purpose.
105 */
106 if (!xt_cluster_is_multicast_addr(skb, par->family) &&
107 skb->pkt_type == PACKET_MULTICAST) {
108 pskb->pkt_type = PACKET_HOST;
109 }
110
111 ct = nf_ct_get(skb, &ctinfo);
112 if (ct == NULL)
113 return false;
114
115 if (ct == &nf_conntrack_untracked)
116 return false;
117
118 if (ct->master)
119 hash = xt_cluster_hash(ct->master, info);
120 else
121 hash = xt_cluster_hash(ct, info);
122
123 return !!((1 << hash) & info->node_mask) ^
124 !!(info->flags & XT_CLUSTER_F_INV);
125}
126
127static bool xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
128{
129 struct xt_cluster_match_info *info = par->matchinfo;
130
131 if (info->node_mask >= (1 << info->total_nodes)) {
132 printk(KERN_ERR "xt_cluster: this node mask cannot be "
133 "higher than the total number of nodes\n");
134 return false;
135 }
136 return true;
137}
138
139static struct xt_match xt_cluster_match __read_mostly = {
140 .name = "cluster",
141 .family = NFPROTO_UNSPEC,
142 .match = xt_cluster_mt,
143 .checkentry = xt_cluster_mt_checkentry,
144 .matchsize = sizeof(struct xt_cluster_match_info),
145 .me = THIS_MODULE,
146};
147
148static int __init xt_cluster_mt_init(void)
149{
150 return xt_register_match(&xt_cluster_match);
151}
152
153static void __exit xt_cluster_mt_fini(void)
154{
155 xt_unregister_match(&xt_cluster_match);
156}
157
158MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
159MODULE_LICENSE("GPL");
160MODULE_DESCRIPTION("Xtables: hash-based cluster match");
161MODULE_ALIAS("ipt_cluster");
162MODULE_ALIAS("ip6t_cluster");
163module_init(xt_cluster_mt_init);
164module_exit(xt_cluster_mt_fini);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index f97fded024c4..a5b5369c30f9 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -149,7 +149,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
149 /* initialize hash with random val at the time we allocate 149 /* initialize hash with random val at the time we allocate
150 * the first hashtable entry */ 150 * the first hashtable entry */
151 if (!ht->rnd_initialized) { 151 if (!ht->rnd_initialized) {
152 get_random_bytes(&ht->rnd, 4); 152 get_random_bytes(&ht->rnd, sizeof(ht->rnd));
153 ht->rnd_initialized = 1; 153 ht->rnd_initialized = 1;
154 } 154 }
155 155
@@ -565,8 +565,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
565static bool 565static bool
566hashlimit_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par) 566hashlimit_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
567{ 567{
568 const struct xt_hashlimit_info *r = 568 const struct xt_hashlimit_info *r = par->matchinfo;
569 ((const struct xt_hashlimit_info *)par->matchinfo)->u.master;
570 struct xt_hashlimit_htable *hinfo = r->hinfo; 569 struct xt_hashlimit_htable *hinfo = r->hinfo;
571 unsigned long now = jiffies; 570 unsigned long now = jiffies;
572 struct dsthash_ent *dh; 571 struct dsthash_ent *dh;
@@ -702,8 +701,6 @@ static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par)
702 } 701 }
703 mutex_unlock(&hlimit_mutex); 702 mutex_unlock(&hlimit_mutex);
704 703
705 /* Ugly hack: For SMP, we only want to use one set */
706 r->u.master = r;
707 return true; 704 return true;
708} 705}
709 706
diff --git a/net/netfilter/xt_hl.c b/net/netfilter/xt_hl.c
new file mode 100644
index 000000000000..7726154c87b2
--- /dev/null
+++ b/net/netfilter/xt_hl.c
@@ -0,0 +1,108 @@
1/*
2 * IP tables module for matching the value of the TTL
3 * (C) 2000,2001 by Harald Welte <laforge@netfilter.org>
4 *
5 * Hop Limit matching module
6 * (C) 2001-2002 Maciej Soltysiak <solt@dns.toxicfilms.tv>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/ip.h>
14#include <linux/ipv6.h>
15#include <linux/module.h>
16#include <linux/skbuff.h>
17
18#include <linux/netfilter/x_tables.h>
19#include <linux/netfilter_ipv4/ipt_ttl.h>
20#include <linux/netfilter_ipv6/ip6t_hl.h>
21
22MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
23MODULE_DESCRIPTION("Xtables: Hoplimit/TTL field match");
24MODULE_LICENSE("GPL");
25MODULE_ALIAS("ipt_ttl");
26MODULE_ALIAS("ip6t_hl");
27
28static bool ttl_mt(const struct sk_buff *skb, const struct xt_match_param *par)
29{
30 const struct ipt_ttl_info *info = par->matchinfo;
31 const u8 ttl = ip_hdr(skb)->ttl;
32
33 switch (info->mode) {
34 case IPT_TTL_EQ:
35 return ttl == info->ttl;
36 case IPT_TTL_NE:
37 return ttl != info->ttl;
38 case IPT_TTL_LT:
39 return ttl < info->ttl;
40 case IPT_TTL_GT:
41 return ttl > info->ttl;
42 default:
43 printk(KERN_WARNING "ipt_ttl: unknown mode %d\n",
44 info->mode);
45 return false;
46 }
47
48 return false;
49}
50
51static bool hl_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
52{
53 const struct ip6t_hl_info *info = par->matchinfo;
54 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
55
56 switch (info->mode) {
57 case IP6T_HL_EQ:
58 return ip6h->hop_limit == info->hop_limit;
59 break;
60 case IP6T_HL_NE:
61 return ip6h->hop_limit != info->hop_limit;
62 break;
63 case IP6T_HL_LT:
64 return ip6h->hop_limit < info->hop_limit;
65 break;
66 case IP6T_HL_GT:
67 return ip6h->hop_limit > info->hop_limit;
68 break;
69 default:
70 printk(KERN_WARNING "ip6t_hl: unknown mode %d\n",
71 info->mode);
72 return false;
73 }
74
75 return false;
76}
77
78static struct xt_match hl_mt_reg[] __read_mostly = {
79 {
80 .name = "ttl",
81 .revision = 0,
82 .family = NFPROTO_IPV4,
83 .match = ttl_mt,
84 .matchsize = sizeof(struct ipt_ttl_info),
85 .me = THIS_MODULE,
86 },
87 {
88 .name = "hl",
89 .revision = 0,
90 .family = NFPROTO_IPV6,
91 .match = hl_mt6,
92 .matchsize = sizeof(struct ip6t_hl_info),
93 .me = THIS_MODULE,
94 },
95};
96
97static int __init hl_mt_init(void)
98{
99 return xt_register_matches(hl_mt_reg, ARRAY_SIZE(hl_mt_reg));
100}
101
102static void __exit hl_mt_exit(void)
103{
104 xt_unregister_matches(hl_mt_reg, ARRAY_SIZE(hl_mt_reg));
105}
106
107module_init(hl_mt_init);
108module_exit(hl_mt_exit);
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index c908d69a5595..2e8089ecd0af 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -14,6 +14,11 @@
14#include <linux/netfilter/x_tables.h> 14#include <linux/netfilter/x_tables.h>
15#include <linux/netfilter/xt_limit.h> 15#include <linux/netfilter/xt_limit.h>
16 16
17struct xt_limit_priv {
18 unsigned long prev;
19 uint32_t credit;
20};
21
17MODULE_LICENSE("GPL"); 22MODULE_LICENSE("GPL");
18MODULE_AUTHOR("Herve Eychenne <rv@wallfire.org>"); 23MODULE_AUTHOR("Herve Eychenne <rv@wallfire.org>");
19MODULE_DESCRIPTION("Xtables: rate-limit match"); 24MODULE_DESCRIPTION("Xtables: rate-limit match");
@@ -60,18 +65,18 @@ static DEFINE_SPINLOCK(limit_lock);
60static bool 65static bool
61limit_mt(const struct sk_buff *skb, const struct xt_match_param *par) 66limit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
62{ 67{
63 struct xt_rateinfo *r = 68 const struct xt_rateinfo *r = par->matchinfo;
64 ((const struct xt_rateinfo *)par->matchinfo)->master; 69 struct xt_limit_priv *priv = r->master;
65 unsigned long now = jiffies; 70 unsigned long now = jiffies;
66 71
67 spin_lock_bh(&limit_lock); 72 spin_lock_bh(&limit_lock);
68 r->credit += (now - xchg(&r->prev, now)) * CREDITS_PER_JIFFY; 73 priv->credit += (now - xchg(&priv->prev, now)) * CREDITS_PER_JIFFY;
69 if (r->credit > r->credit_cap) 74 if (priv->credit > r->credit_cap)
70 r->credit = r->credit_cap; 75 priv->credit = r->credit_cap;
71 76
72 if (r->credit >= r->cost) { 77 if (priv->credit >= r->cost) {
73 /* We're not limited. */ 78 /* We're not limited. */
74 r->credit -= r->cost; 79 priv->credit -= r->cost;
75 spin_unlock_bh(&limit_lock); 80 spin_unlock_bh(&limit_lock);
76 return true; 81 return true;
77 } 82 }
@@ -95,6 +100,7 @@ user2credits(u_int32_t user)
95static bool limit_mt_check(const struct xt_mtchk_param *par) 100static bool limit_mt_check(const struct xt_mtchk_param *par)
96{ 101{
97 struct xt_rateinfo *r = par->matchinfo; 102 struct xt_rateinfo *r = par->matchinfo;
103 struct xt_limit_priv *priv;
98 104
99 /* Check for overflow. */ 105 /* Check for overflow. */
100 if (r->burst == 0 106 if (r->burst == 0
@@ -104,19 +110,30 @@ static bool limit_mt_check(const struct xt_mtchk_param *par)
104 return false; 110 return false;
105 } 111 }
106 112
107 /* For SMP, we only want to use one set of counters. */ 113 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
108 r->master = r; 114 if (priv == NULL)
115 return -ENOMEM;
116
117 /* For SMP, we only want to use one set of state. */
118 r->master = priv;
109 if (r->cost == 0) { 119 if (r->cost == 0) {
110 /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies * 120 /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
111 128. */ 121 128. */
112 r->prev = jiffies; 122 priv->prev = jiffies;
113 r->credit = user2credits(r->avg * r->burst); /* Credits full. */ 123 priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
114 r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */ 124 r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
115 r->cost = user2credits(r->avg); 125 r->cost = user2credits(r->avg);
116 } 126 }
117 return true; 127 return true;
118} 128}
119 129
130static void limit_mt_destroy(const struct xt_mtdtor_param *par)
131{
132 const struct xt_rateinfo *info = par->matchinfo;
133
134 kfree(info->master);
135}
136
120#ifdef CONFIG_COMPAT 137#ifdef CONFIG_COMPAT
121struct compat_xt_rateinfo { 138struct compat_xt_rateinfo {
122 u_int32_t avg; 139 u_int32_t avg;
@@ -167,6 +184,7 @@ static struct xt_match limit_mt_reg __read_mostly = {
167 .family = NFPROTO_UNSPEC, 184 .family = NFPROTO_UNSPEC,
168 .match = limit_mt, 185 .match = limit_mt,
169 .checkentry = limit_mt_check, 186 .checkentry = limit_mt_check,
187 .destroy = limit_mt_destroy,
170 .matchsize = sizeof(struct xt_rateinfo), 188 .matchsize = sizeof(struct xt_rateinfo),
171#ifdef CONFIG_COMPAT 189#ifdef CONFIG_COMPAT
172 .compatsize = sizeof(struct compat_xt_rateinfo), 190 .compatsize = sizeof(struct compat_xt_rateinfo),
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 1bcdfc12cf59..44a234ef4439 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -20,13 +20,30 @@ MODULE_DESCRIPTION("Xtables: Bridge physical device match");
20MODULE_ALIAS("ipt_physdev"); 20MODULE_ALIAS("ipt_physdev");
21MODULE_ALIAS("ip6t_physdev"); 21MODULE_ALIAS("ip6t_physdev");
22 22
23static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask)
24{
25 const unsigned long *a = (const unsigned long *)_a;
26 const unsigned long *b = (const unsigned long *)_b;
27 const unsigned long *mask = (const unsigned long *)_mask;
28 unsigned long ret;
29
30 ret = (a[0] ^ b[0]) & mask[0];
31 if (IFNAMSIZ > sizeof(unsigned long))
32 ret |= (a[1] ^ b[1]) & mask[1];
33 if (IFNAMSIZ > 2 * sizeof(unsigned long))
34 ret |= (a[2] ^ b[2]) & mask[2];
35 if (IFNAMSIZ > 3 * sizeof(unsigned long))
36 ret |= (a[3] ^ b[3]) & mask[3];
37 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
38 return ret;
39}
40
23static bool 41static bool
24physdev_mt(const struct sk_buff *skb, const struct xt_match_param *par) 42physdev_mt(const struct sk_buff *skb, const struct xt_match_param *par)
25{ 43{
26 int i; 44 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
27 static const char nulldevname[IFNAMSIZ];
28 const struct xt_physdev_info *info = par->matchinfo; 45 const struct xt_physdev_info *info = par->matchinfo;
29 bool ret; 46 unsigned long ret;
30 const char *indev, *outdev; 47 const char *indev, *outdev;
31 const struct nf_bridge_info *nf_bridge; 48 const struct nf_bridge_info *nf_bridge;
32 49
@@ -68,11 +85,7 @@ physdev_mt(const struct sk_buff *skb, const struct xt_match_param *par)
68 if (!(info->bitmask & XT_PHYSDEV_OP_IN)) 85 if (!(info->bitmask & XT_PHYSDEV_OP_IN))
69 goto match_outdev; 86 goto match_outdev;
70 indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname; 87 indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname;
71 for (i = 0, ret = false; i < IFNAMSIZ/sizeof(unsigned int); i++) { 88 ret = ifname_compare(indev, info->physindev, info->in_mask);
72 ret |= (((const unsigned int *)indev)[i]
73 ^ ((const unsigned int *)info->physindev)[i])
74 & ((const unsigned int *)info->in_mask)[i];
75 }
76 89
77 if (!ret ^ !(info->invert & XT_PHYSDEV_OP_IN)) 90 if (!ret ^ !(info->invert & XT_PHYSDEV_OP_IN))
78 return false; 91 return false;
@@ -82,13 +95,9 @@ match_outdev:
82 return true; 95 return true;
83 outdev = nf_bridge->physoutdev ? 96 outdev = nf_bridge->physoutdev ?
84 nf_bridge->physoutdev->name : nulldevname; 97 nf_bridge->physoutdev->name : nulldevname;
85 for (i = 0, ret = false; i < IFNAMSIZ/sizeof(unsigned int); i++) { 98 ret = ifname_compare(outdev, info->physoutdev, info->out_mask);
86 ret |= (((const unsigned int *)outdev)[i]
87 ^ ((const unsigned int *)info->physoutdev)[i])
88 & ((const unsigned int *)info->out_mask)[i];
89 }
90 99
91 return ret ^ !(info->invert & XT_PHYSDEV_OP_OUT); 100 return (!!ret ^ !(info->invert & XT_PHYSDEV_OP_OUT));
92} 101}
93 102
94static bool physdev_mt_check(const struct xt_mtchk_param *par) 103static bool physdev_mt_check(const struct xt_mtchk_param *par)
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index c84fce5e0f3e..01dd07b764ec 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -9,6 +9,10 @@
9#include <linux/netfilter/x_tables.h> 9#include <linux/netfilter/x_tables.h>
10#include <linux/netfilter/xt_quota.h> 10#include <linux/netfilter/xt_quota.h>
11 11
12struct xt_quota_priv {
13 uint64_t quota;
14};
15
12MODULE_LICENSE("GPL"); 16MODULE_LICENSE("GPL");
13MODULE_AUTHOR("Sam Johnston <samj@samj.net>"); 17MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
14MODULE_DESCRIPTION("Xtables: countdown quota match"); 18MODULE_DESCRIPTION("Xtables: countdown quota match");
@@ -20,18 +24,20 @@ static DEFINE_SPINLOCK(quota_lock);
20static bool 24static bool
21quota_mt(const struct sk_buff *skb, const struct xt_match_param *par) 25quota_mt(const struct sk_buff *skb, const struct xt_match_param *par)
22{ 26{
23 struct xt_quota_info *q = 27 struct xt_quota_info *q = (void *)par->matchinfo;
24 ((const struct xt_quota_info *)par->matchinfo)->master; 28 struct xt_quota_priv *priv = q->master;
25 bool ret = q->flags & XT_QUOTA_INVERT; 29 bool ret = q->flags & XT_QUOTA_INVERT;
26 30
27 spin_lock_bh(&quota_lock); 31 spin_lock_bh(&quota_lock);
28 if (q->quota >= skb->len) { 32 if (priv->quota >= skb->len) {
29 q->quota -= skb->len; 33 priv->quota -= skb->len;
30 ret = !ret; 34 ret = !ret;
31 } else { 35 } else {
32 /* we do not allow even small packets from now on */ 36 /* we do not allow even small packets from now on */
33 q->quota = 0; 37 priv->quota = 0;
34 } 38 }
39 /* Copy quota back to matchinfo so that iptables can display it */
40 q->quota = priv->quota;
35 spin_unlock_bh(&quota_lock); 41 spin_unlock_bh(&quota_lock);
36 42
37 return ret; 43 return ret;
@@ -43,17 +49,28 @@ static bool quota_mt_check(const struct xt_mtchk_param *par)
43 49
44 if (q->flags & ~XT_QUOTA_MASK) 50 if (q->flags & ~XT_QUOTA_MASK)
45 return false; 51 return false;
46 /* For SMP, we only want to use one set of counters. */ 52
47 q->master = q; 53 q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
54 if (q->master == NULL)
55 return -ENOMEM;
56
48 return true; 57 return true;
49} 58}
50 59
60static void quota_mt_destroy(const struct xt_mtdtor_param *par)
61{
62 const struct xt_quota_info *q = par->matchinfo;
63
64 kfree(q->master);
65}
66
51static struct xt_match quota_mt_reg __read_mostly = { 67static struct xt_match quota_mt_reg __read_mostly = {
52 .name = "quota", 68 .name = "quota",
53 .revision = 0, 69 .revision = 0,
54 .family = NFPROTO_UNSPEC, 70 .family = NFPROTO_UNSPEC,
55 .match = quota_mt, 71 .match = quota_mt,
56 .checkentry = quota_mt_check, 72 .checkentry = quota_mt_check,
73 .destroy = quota_mt_destroy,
57 .matchsize = sizeof(struct xt_quota_info), 74 .matchsize = sizeof(struct xt_quota_info),
58 .me = THIS_MODULE, 75 .me = THIS_MODULE,
59}; 76};
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
index 0d75141139d5..d8c0f8f1a78e 100644
--- a/net/netfilter/xt_statistic.c
+++ b/net/netfilter/xt_statistic.c
@@ -16,6 +16,10 @@
16#include <linux/netfilter/xt_statistic.h> 16#include <linux/netfilter/xt_statistic.h>
17#include <linux/netfilter/x_tables.h> 17#include <linux/netfilter/x_tables.h>
18 18
19struct xt_statistic_priv {
20 uint32_t count;
21};
22
19MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
20MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 24MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
21MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)"); 25MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)");
@@ -27,7 +31,7 @@ static DEFINE_SPINLOCK(nth_lock);
27static bool 31static bool
28statistic_mt(const struct sk_buff *skb, const struct xt_match_param *par) 32statistic_mt(const struct sk_buff *skb, const struct xt_match_param *par)
29{ 33{
30 struct xt_statistic_info *info = (void *)par->matchinfo; 34 const struct xt_statistic_info *info = par->matchinfo;
31 bool ret = info->flags & XT_STATISTIC_INVERT; 35 bool ret = info->flags & XT_STATISTIC_INVERT;
32 36
33 switch (info->mode) { 37 switch (info->mode) {
@@ -36,10 +40,9 @@ statistic_mt(const struct sk_buff *skb, const struct xt_match_param *par)
36 ret = !ret; 40 ret = !ret;
37 break; 41 break;
38 case XT_STATISTIC_MODE_NTH: 42 case XT_STATISTIC_MODE_NTH:
39 info = info->master;
40 spin_lock_bh(&nth_lock); 43 spin_lock_bh(&nth_lock);
41 if (info->u.nth.count++ == info->u.nth.every) { 44 if (info->master->count++ == info->u.nth.every) {
42 info->u.nth.count = 0; 45 info->master->count = 0;
43 ret = !ret; 46 ret = !ret;
44 } 47 }
45 spin_unlock_bh(&nth_lock); 48 spin_unlock_bh(&nth_lock);
@@ -56,16 +59,31 @@ static bool statistic_mt_check(const struct xt_mtchk_param *par)
56 if (info->mode > XT_STATISTIC_MODE_MAX || 59 if (info->mode > XT_STATISTIC_MODE_MAX ||
57 info->flags & ~XT_STATISTIC_MASK) 60 info->flags & ~XT_STATISTIC_MASK)
58 return false; 61 return false;
59 info->master = info; 62
63 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
64 if (info->master == NULL) {
65 printk(KERN_ERR KBUILD_MODNAME ": Out of memory\n");
66 return false;
67 }
68 info->master->count = info->u.nth.count;
69
60 return true; 70 return true;
61} 71}
62 72
73static void statistic_mt_destroy(const struct xt_mtdtor_param *par)
74{
75 const struct xt_statistic_info *info = par->matchinfo;
76
77 kfree(info->master);
78}
79
63static struct xt_match xt_statistic_mt_reg __read_mostly = { 80static struct xt_match xt_statistic_mt_reg __read_mostly = {
64 .name = "statistic", 81 .name = "statistic",
65 .revision = 0, 82 .revision = 0,
66 .family = NFPROTO_UNSPEC, 83 .family = NFPROTO_UNSPEC,
67 .match = statistic_mt, 84 .match = statistic_mt,
68 .checkentry = statistic_mt_check, 85 .checkentry = statistic_mt_check,
86 .destroy = statistic_mt_destroy,
69 .matchsize = sizeof(struct xt_statistic_info), 87 .matchsize = sizeof(struct xt_statistic_info),
70 .me = THIS_MODULE, 88 .me = THIS_MODULE,
71}; 89};
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a007dbb4c9f1..b73d4e61c5ac 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1117,6 +1117,7 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1117 1117
1118 read_unlock(&nl_table_lock); 1118 read_unlock(&nl_table_lock);
1119} 1119}
1120EXPORT_SYMBOL(netlink_set_err);
1120 1121
1121/* must be called with netlink table grabbed */ 1122/* must be called with netlink table grabbed */
1122static void netlink_update_socket_mc(struct netlink_sock *nlk, 1123static void netlink_update_socket_mc(struct netlink_sock *nlk,
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 972201cd5fa7..0b15d7250c40 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -61,7 +61,7 @@ static struct ctl_table_root net_sysctl_root = {
61static int net_ctl_ro_header_perms(struct ctl_table_root *root, 61static int net_ctl_ro_header_perms(struct ctl_table_root *root,
62 struct nsproxy *namespaces, struct ctl_table *table) 62 struct nsproxy *namespaces, struct ctl_table *table)
63{ 63{
64 if (namespaces->net_ns == &init_net) 64 if (net_eq(namespaces->net_ns, &init_net))
65 return table->mode; 65 return table->mode;
66 else 66 else
67 return table->mode & ~0222; 67 return table->mode & ~0222;