diff options
-rw-r--r-- | net/ipv4/netfilter/arp_tables.c | 10 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_tables.c | 10 | ||||
-rw-r--r-- | net/ipv6/netfilter/ip6_tables.c | 10 |
3 files changed, 18 insertions, 12 deletions
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index c868dd53e432..6bccba31d132 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -710,7 +710,7 @@ static void get_counters(const struct xt_table_info *t, | |||
710 | struct arpt_entry *iter; | 710 | struct arpt_entry *iter; |
711 | unsigned int cpu; | 711 | unsigned int cpu; |
712 | unsigned int i; | 712 | unsigned int i; |
713 | unsigned int curcpu; | 713 | unsigned int curcpu = get_cpu(); |
714 | 714 | ||
715 | /* Instead of clearing (by a previous call to memset()) | 715 | /* Instead of clearing (by a previous call to memset()) |
716 | * the counters and using adds, we set the counters | 716 | * the counters and using adds, we set the counters |
@@ -720,14 +720,16 @@ static void get_counters(const struct xt_table_info *t, | |||
720 | * if new softirq were to run and call ipt_do_table | 720 | * if new softirq were to run and call ipt_do_table |
721 | */ | 721 | */ |
722 | local_bh_disable(); | 722 | local_bh_disable(); |
723 | curcpu = smp_processor_id(); | ||
724 | |||
725 | i = 0; | 723 | i = 0; |
726 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | 724 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { |
727 | SET_COUNTER(counters[i], iter->counters.bcnt, | 725 | SET_COUNTER(counters[i], iter->counters.bcnt, |
728 | iter->counters.pcnt); | 726 | iter->counters.pcnt); |
729 | ++i; | 727 | ++i; |
730 | } | 728 | } |
729 | local_bh_enable(); | ||
730 | /* Processing counters from other cpus, we can let bottom half enabled, | ||
731 | * (preemption is disabled) | ||
732 | */ | ||
731 | 733 | ||
732 | for_each_possible_cpu(cpu) { | 734 | for_each_possible_cpu(cpu) { |
733 | if (cpu == curcpu) | 735 | if (cpu == curcpu) |
@@ -741,7 +743,7 @@ static void get_counters(const struct xt_table_info *t, | |||
741 | } | 743 | } |
742 | xt_info_wrunlock(cpu); | 744 | xt_info_wrunlock(cpu); |
743 | } | 745 | } |
744 | local_bh_enable(); | 746 | put_cpu(); |
745 | } | 747 | } |
746 | 748 | ||
747 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 749 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 3c584a6765b0..c439721b165a 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -884,7 +884,7 @@ get_counters(const struct xt_table_info *t, | |||
884 | struct ipt_entry *iter; | 884 | struct ipt_entry *iter; |
885 | unsigned int cpu; | 885 | unsigned int cpu; |
886 | unsigned int i; | 886 | unsigned int i; |
887 | unsigned int curcpu; | 887 | unsigned int curcpu = get_cpu(); |
888 | 888 | ||
889 | /* Instead of clearing (by a previous call to memset()) | 889 | /* Instead of clearing (by a previous call to memset()) |
890 | * the counters and using adds, we set the counters | 890 | * the counters and using adds, we set the counters |
@@ -894,14 +894,16 @@ get_counters(const struct xt_table_info *t, | |||
894 | * if new softirq were to run and call ipt_do_table | 894 | * if new softirq were to run and call ipt_do_table |
895 | */ | 895 | */ |
896 | local_bh_disable(); | 896 | local_bh_disable(); |
897 | curcpu = smp_processor_id(); | ||
898 | |||
899 | i = 0; | 897 | i = 0; |
900 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | 898 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { |
901 | SET_COUNTER(counters[i], iter->counters.bcnt, | 899 | SET_COUNTER(counters[i], iter->counters.bcnt, |
902 | iter->counters.pcnt); | 900 | iter->counters.pcnt); |
903 | ++i; | 901 | ++i; |
904 | } | 902 | } |
903 | local_bh_enable(); | ||
904 | /* Processing counters from other cpus, we can let bottom half enabled, | ||
905 | * (preemption is disabled) | ||
906 | */ | ||
905 | 907 | ||
906 | for_each_possible_cpu(cpu) { | 908 | for_each_possible_cpu(cpu) { |
907 | if (cpu == curcpu) | 909 | if (cpu == curcpu) |
@@ -915,7 +917,7 @@ get_counters(const struct xt_table_info *t, | |||
915 | } | 917 | } |
916 | xt_info_wrunlock(cpu); | 918 | xt_info_wrunlock(cpu); |
917 | } | 919 | } |
918 | local_bh_enable(); | 920 | put_cpu(); |
919 | } | 921 | } |
920 | 922 | ||
921 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 923 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 33113c1ea02f..5359ef4daac5 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -897,7 +897,7 @@ get_counters(const struct xt_table_info *t, | |||
897 | struct ip6t_entry *iter; | 897 | struct ip6t_entry *iter; |
898 | unsigned int cpu; | 898 | unsigned int cpu; |
899 | unsigned int i; | 899 | unsigned int i; |
900 | unsigned int curcpu; | 900 | unsigned int curcpu = get_cpu(); |
901 | 901 | ||
902 | /* Instead of clearing (by a previous call to memset()) | 902 | /* Instead of clearing (by a previous call to memset()) |
903 | * the counters and using adds, we set the counters | 903 | * the counters and using adds, we set the counters |
@@ -907,14 +907,16 @@ get_counters(const struct xt_table_info *t, | |||
907 | * if new softirq were to run and call ipt_do_table | 907 | * if new softirq were to run and call ipt_do_table |
908 | */ | 908 | */ |
909 | local_bh_disable(); | 909 | local_bh_disable(); |
910 | curcpu = smp_processor_id(); | ||
911 | |||
912 | i = 0; | 910 | i = 0; |
913 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | 911 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { |
914 | SET_COUNTER(counters[i], iter->counters.bcnt, | 912 | SET_COUNTER(counters[i], iter->counters.bcnt, |
915 | iter->counters.pcnt); | 913 | iter->counters.pcnt); |
916 | ++i; | 914 | ++i; |
917 | } | 915 | } |
916 | local_bh_enable(); | ||
917 | /* Processing counters from other cpus, we can let bottom half enabled, | ||
918 | * (preemption is disabled) | ||
919 | */ | ||
918 | 920 | ||
919 | for_each_possible_cpu(cpu) { | 921 | for_each_possible_cpu(cpu) { |
920 | if (cpu == curcpu) | 922 | if (cpu == curcpu) |
@@ -928,7 +930,7 @@ get_counters(const struct xt_table_info *t, | |||
928 | } | 930 | } |
929 | xt_info_wrunlock(cpu); | 931 | xt_info_wrunlock(cpu); |
930 | } | 932 | } |
931 | local_bh_enable(); | 933 | put_cpu(); |
932 | } | 934 | } |
933 | 935 | ||
934 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 936 | static struct xt_counters *alloc_counters(const struct xt_table *table) |