aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-04-04 11:04:03 -0400
committerPatrick McHardy <kaber@trash.net>2011-04-04 11:04:03 -0400
commit7f5c6d4f665bb57a19a34ce1fb16cc708c04f219 (patch)
treee804faa506bbf9edcfd1fdadb2ab3749f58836cd /net/ipv6
parent8f7b01a178b8e6a7b663a1bbaa1710756d67b69b (diff)
netfilter: get rid of atomic ops in fast path
We currently use a percpu spinlock to 'protect' rule bytes/packets counters, after various attempts to use RCU instead. Lately we added a seqlock so that get_counters() can run without blocking BH or 'writers'. But we really only need the seqcount in it. Spinlock itself is only locked by the current/owner cpu, so we can remove it completely. This cleanups api, using correct 'writer' vs 'reader' semantic. At replace time, the get_counters() call makes sure all cpus are done using the old table. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Jan Engelhardt <jengelh@medozas.de> Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/netfilter/ip6_tables.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 0b2af9b85cec..ec7cf579cdd4 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -340,6 +340,7 @@ ip6t_do_table(struct sk_buff *skb,
340 unsigned int *stackptr, origptr, cpu; 340 unsigned int *stackptr, origptr, cpu;
341 const struct xt_table_info *private; 341 const struct xt_table_info *private;
342 struct xt_action_param acpar; 342 struct xt_action_param acpar;
343 unsigned int addend;
343 344
344 /* Initialization */ 345 /* Initialization */
345 indev = in ? in->name : nulldevname; 346 indev = in ? in->name : nulldevname;
@@ -358,7 +359,8 @@ ip6t_do_table(struct sk_buff *skb,
358 359
359 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 360 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
360 361
361 xt_info_rdlock_bh(); 362 local_bh_disable();
363 addend = xt_write_recseq_begin();
362 private = table->private; 364 private = table->private;
363 cpu = smp_processor_id(); 365 cpu = smp_processor_id();
364 table_base = private->entries[cpu]; 366 table_base = private->entries[cpu];
@@ -442,7 +444,9 @@ ip6t_do_table(struct sk_buff *skb,
442 } while (!acpar.hotdrop); 444 } while (!acpar.hotdrop);
443 445
444 *stackptr = origptr; 446 *stackptr = origptr;
445 xt_info_rdunlock_bh(); 447
448 xt_write_recseq_end(addend);
449 local_bh_enable();
446 450
447#ifdef DEBUG_ALLOW_ALL 451#ifdef DEBUG_ALLOW_ALL
448 return NF_ACCEPT; 452 return NF_ACCEPT;
@@ -899,7 +903,7 @@ get_counters(const struct xt_table_info *t,
899 unsigned int i; 903 unsigned int i;
900 904
901 for_each_possible_cpu(cpu) { 905 for_each_possible_cpu(cpu) {
902 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 906 seqcount_t *s = &per_cpu(xt_recseq, cpu);
903 907
904 i = 0; 908 i = 0;
905 xt_entry_foreach(iter, t->entries[cpu], t->size) { 909 xt_entry_foreach(iter, t->entries[cpu], t->size) {
@@ -907,10 +911,10 @@ get_counters(const struct xt_table_info *t,
907 unsigned int start; 911 unsigned int start;
908 912
909 do { 913 do {
910 start = read_seqbegin(lock); 914 start = read_seqcount_begin(s);
911 bcnt = iter->counters.bcnt; 915 bcnt = iter->counters.bcnt;
912 pcnt = iter->counters.pcnt; 916 pcnt = iter->counters.pcnt;
913 } while (read_seqretry(lock, start)); 917 } while (read_seqcount_retry(s, start));
914 918
915 ADD_COUNTER(counters[i], bcnt, pcnt); 919 ADD_COUNTER(counters[i], bcnt, pcnt);
916 ++i; 920 ++i;
@@ -1325,6 +1329,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
1325 int ret = 0; 1329 int ret = 0;
1326 const void *loc_cpu_entry; 1330 const void *loc_cpu_entry;
1327 struct ip6t_entry *iter; 1331 struct ip6t_entry *iter;
1332 unsigned int addend;
1328#ifdef CONFIG_COMPAT 1333#ifdef CONFIG_COMPAT
1329 struct compat_xt_counters_info compat_tmp; 1334 struct compat_xt_counters_info compat_tmp;
1330 1335
@@ -1381,13 +1386,13 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
1381 i = 0; 1386 i = 0;
1382 /* Choose the copy that is on our node */ 1387 /* Choose the copy that is on our node */
1383 curcpu = smp_processor_id(); 1388 curcpu = smp_processor_id();
1384 xt_info_wrlock(curcpu); 1389 addend = xt_write_recseq_begin();
1385 loc_cpu_entry = private->entries[curcpu]; 1390 loc_cpu_entry = private->entries[curcpu];
1386 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1391 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1387 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1392 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1388 ++i; 1393 ++i;
1389 } 1394 }
1390 xt_info_wrunlock(curcpu); 1395 xt_write_recseq_end(addend);
1391 1396
1392 unlock_up_free: 1397 unlock_up_free:
1393 local_bh_enable(); 1398 local_bh_enable();