aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/netfilter/arp_tables.c10
-rw-r--r--net/ipv4/netfilter/ip_tables.c10
2 files changed, 12 insertions, 8 deletions
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index c868dd53e432..6bccba31d132 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -710,7 +710,7 @@ static void get_counters(const struct xt_table_info *t,
710 struct arpt_entry *iter; 710 struct arpt_entry *iter;
711 unsigned int cpu; 711 unsigned int cpu;
712 unsigned int i; 712 unsigned int i;
713 unsigned int curcpu; 713 unsigned int curcpu = get_cpu();
714 714
715 /* Instead of clearing (by a previous call to memset()) 715 /* Instead of clearing (by a previous call to memset())
716 * the counters and using adds, we set the counters 716 * the counters and using adds, we set the counters
@@ -720,14 +720,16 @@ static void get_counters(const struct xt_table_info *t,
720 * if new softirq were to run and call ipt_do_table 720 * if new softirq were to run and call ipt_do_table
721 */ 721 */
722 local_bh_disable(); 722 local_bh_disable();
723 curcpu = smp_processor_id();
724
725 i = 0; 723 i = 0;
726 xt_entry_foreach(iter, t->entries[curcpu], t->size) { 724 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
727 SET_COUNTER(counters[i], iter->counters.bcnt, 725 SET_COUNTER(counters[i], iter->counters.bcnt,
728 iter->counters.pcnt); 726 iter->counters.pcnt);
729 ++i; 727 ++i;
730 } 728 }
729 local_bh_enable();
730 /* Processing counters from other cpus, we can let bottom half enabled,
731 * (preemption is disabled)
732 */
731 733
732 for_each_possible_cpu(cpu) { 734 for_each_possible_cpu(cpu) {
733 if (cpu == curcpu) 735 if (cpu == curcpu)
@@ -741,7 +743,7 @@ static void get_counters(const struct xt_table_info *t,
741 } 743 }
742 xt_info_wrunlock(cpu); 744 xt_info_wrunlock(cpu);
743 } 745 }
744 local_bh_enable(); 746 put_cpu();
745} 747}
746 748
747static struct xt_counters *alloc_counters(const struct xt_table *table) 749static struct xt_counters *alloc_counters(const struct xt_table *table)
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 3c584a6765b0..c439721b165a 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -884,7 +884,7 @@ get_counters(const struct xt_table_info *t,
884 struct ipt_entry *iter; 884 struct ipt_entry *iter;
885 unsigned int cpu; 885 unsigned int cpu;
886 unsigned int i; 886 unsigned int i;
887 unsigned int curcpu; 887 unsigned int curcpu = get_cpu();
888 888
889 /* Instead of clearing (by a previous call to memset()) 889 /* Instead of clearing (by a previous call to memset())
890 * the counters and using adds, we set the counters 890 * the counters and using adds, we set the counters
@@ -894,14 +894,16 @@ get_counters(const struct xt_table_info *t,
894 * if new softirq were to run and call ipt_do_table 894 * if new softirq were to run and call ipt_do_table
895 */ 895 */
896 local_bh_disable(); 896 local_bh_disable();
897 curcpu = smp_processor_id();
898
899 i = 0; 897 i = 0;
900 xt_entry_foreach(iter, t->entries[curcpu], t->size) { 898 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
901 SET_COUNTER(counters[i], iter->counters.bcnt, 899 SET_COUNTER(counters[i], iter->counters.bcnt,
902 iter->counters.pcnt); 900 iter->counters.pcnt);
903 ++i; 901 ++i;
904 } 902 }
903 local_bh_enable();
904 /* Processing counters from other cpus, we can let bottom half enabled,
905 * (preemption is disabled)
906 */
905 907
906 for_each_possible_cpu(cpu) { 908 for_each_possible_cpu(cpu) {
907 if (cpu == curcpu) 909 if (cpu == curcpu)
@@ -915,7 +917,7 @@ get_counters(const struct xt_table_info *t,
915 } 917 }
916 xt_info_wrunlock(cpu); 918 xt_info_wrunlock(cpu);
917 } 919 }
918 local_bh_enable(); 920 put_cpu();
919} 921}
920 922
921static struct xt_counters *alloc_counters(const struct xt_table *table) 923static struct xt_counters *alloc_counters(const struct xt_table *table)