aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-08-02 10:49:01 -0400
committerPatrick McHardy <kaber@trash.net>2010-08-02 10:49:01 -0400
commit24b36f0193467fa727b85b4c004016a8dae999b9 (patch)
treed9518ed0ef2012fd14567d03b500295dfb7fe7e0 /net/ipv6
parent7df0884ce144396fc151f2af7a73d5fb305f9b03 (diff)
netfilter: {ip,ip6,arp}_tables: dont block bottom half more than necessary
We currently disable BH for the whole duration of get_counters() On machines with a lot of cpus and large tables, this might be too long. We can disable preemption during the whole function, and disable BH only while fetching counters for the current cpu. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/netfilter/ip6_tables.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 33113c1ea02f..5359ef4daac5 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -897,7 +897,7 @@ get_counters(const struct xt_table_info *t,
897 struct ip6t_entry *iter; 897 struct ip6t_entry *iter;
898 unsigned int cpu; 898 unsigned int cpu;
899 unsigned int i; 899 unsigned int i;
900 unsigned int curcpu; 900 unsigned int curcpu = get_cpu();
901 901
902 /* Instead of clearing (by a previous call to memset()) 902 /* Instead of clearing (by a previous call to memset())
903 * the counters and using adds, we set the counters 903 * the counters and using adds, we set the counters
@@ -907,14 +907,16 @@ get_counters(const struct xt_table_info *t,
907 * if new softirq were to run and call ipt_do_table 907 * if new softirq were to run and call ipt_do_table
908 */ 908 */
909 local_bh_disable(); 909 local_bh_disable();
910 curcpu = smp_processor_id();
911
912 i = 0; 910 i = 0;
913 xt_entry_foreach(iter, t->entries[curcpu], t->size) { 911 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
914 SET_COUNTER(counters[i], iter->counters.bcnt, 912 SET_COUNTER(counters[i], iter->counters.bcnt,
915 iter->counters.pcnt); 913 iter->counters.pcnt);
916 ++i; 914 ++i;
917 } 915 }
916 local_bh_enable();
917 /* Processing counters from other cpus, we can let bottom half enabled,
918 * (preemption is disabled)
919 */
918 920
919 for_each_possible_cpu(cpu) { 921 for_each_possible_cpu(cpu) {
920 if (cpu == curcpu) 922 if (cpu == curcpu)
@@ -928,7 +930,7 @@ get_counters(const struct xt_table_info *t,
928 } 930 }
929 xt_info_wrunlock(cpu); 931 xt_info_wrunlock(cpu);
930 } 932 }
931 local_bh_enable(); 933 put_cpu();
932} 934}
933 935
934static struct xt_counters *alloc_counters(const struct xt_table *table) 936static struct xt_counters *alloc_counters(const struct xt_table *table)