diff options
author | Patrick McHardy <kaber@trash.net> | 2007-12-18 00:56:48 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 17:58:47 -0500 |
commit | 27e2c26b85b6b234411d94127201436c1ec9c002 (patch) | |
tree | 61daa442ef241b7b0e21c4b26efad2efaf5fed2f /net | |
parent | fb5b6095f320bd5a615049aa5fe8827ae9d1bf80 (diff) |
[NETFILTER]: arp_tables: move counter allocation to seperate function
More resyncing with ip_tables.c as preparation for compat support.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/netfilter/arp_tables.c | 29 |
1 files changed, 21 insertions, 8 deletions
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index b0f43315842d..eaca06eace03 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -706,16 +706,11 @@ static void get_counters(const struct xt_table_info *t, | |||
706 | } | 706 | } |
707 | } | 707 | } |
708 | 708 | ||
709 | static int copy_entries_to_user(unsigned int total_size, | 709 | static inline struct xt_counters *alloc_counters(struct arpt_table *table) |
710 | struct arpt_table *table, | ||
711 | void __user *userptr) | ||
712 | { | 710 | { |
713 | unsigned int off, num, countersize; | 711 | unsigned int countersize; |
714 | struct arpt_entry *e; | ||
715 | struct xt_counters *counters; | 712 | struct xt_counters *counters; |
716 | struct xt_table_info *private = table->private; | 713 | struct xt_table_info *private = table->private; |
717 | int ret = 0; | ||
718 | void *loc_cpu_entry; | ||
719 | 714 | ||
720 | /* We need atomic snapshot of counters: rest doesn't change | 715 | /* We need atomic snapshot of counters: rest doesn't change |
721 | * (other than comefrom, which userspace doesn't care | 716 | * (other than comefrom, which userspace doesn't care |
@@ -725,13 +720,31 @@ static int copy_entries_to_user(unsigned int total_size, | |||
725 | counters = vmalloc_node(countersize, numa_node_id()); | 720 | counters = vmalloc_node(countersize, numa_node_id()); |
726 | 721 | ||
727 | if (counters == NULL) | 722 | if (counters == NULL) |
728 | return -ENOMEM; | 723 | return ERR_PTR(-ENOMEM); |
729 | 724 | ||
730 | /* First, sum counters... */ | 725 | /* First, sum counters... */ |
731 | write_lock_bh(&table->lock); | 726 | write_lock_bh(&table->lock); |
732 | get_counters(private, counters); | 727 | get_counters(private, counters); |
733 | write_unlock_bh(&table->lock); | 728 | write_unlock_bh(&table->lock); |
734 | 729 | ||
730 | return counters; | ||
731 | } | ||
732 | |||
733 | static int copy_entries_to_user(unsigned int total_size, | ||
734 | struct arpt_table *table, | ||
735 | void __user *userptr) | ||
736 | { | ||
737 | unsigned int off, num; | ||
738 | struct arpt_entry *e; | ||
739 | struct xt_counters *counters; | ||
740 | struct xt_table_info *private = table->private; | ||
741 | int ret = 0; | ||
742 | void *loc_cpu_entry; | ||
743 | |||
744 | counters = alloc_counters(table); | ||
745 | if (IS_ERR(counters)) | ||
746 | return PTR_ERR(counters); | ||
747 | |||
735 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 748 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
736 | /* ... then copy entire thing ... */ | 749 | /* ... then copy entire thing ... */ |
737 | if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { | 750 | if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { |