aboutsummaryrefslogtreecommitdiffstats
path: root/net/netfilter
diff options
context:
space:
mode:
authorJan Engelhardt <jengelh@medozas.de>2010-04-19 10:05:10 -0400
committerPatrick McHardy <kaber@trash.net>2010-04-19 10:05:10 -0400
commitf3c5c1bfd430858d3a05436f82c51e53104feb6b (patch)
treeada5b570b66e141e79fdb256f69e2541a3d30c04 /net/netfilter
parente281b19897dc21c1071802808d461627d747a877 (diff)
netfilter: xtables: make ip_tables reentrant
Currently, the table traverser stores return addresses in the ruleset itself (struct ip6t_entry->comefrom). This has a well-known drawback: the jumpstack is overwritten on reentry, making it necessary for targets to return absolute verdicts. Also, the ruleset (which might be heavy memory-wise) needs to be replicated for each CPU that can possibly invoke ip6t_do_table. This patch decouples the jumpstack from struct ip6t_entry and instead puts it into xt_table_info. Not being restricted by 'comefrom' anymore, we can set up a stack as needed. By default, there is room allocated for two entries into the traverser. arp_tables is not touched though, because there is just one/two modules and further patches seek to collapse the table traverser anyhow. Signed-off-by: Jan Engelhardt <jengelh@medozas.de> Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/netfilter')
-rw-r--r--net/netfilter/x_tables.c77
1 files changed, 77 insertions, 0 deletions
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8e23d8f68459..edde5c602890 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -62,6 +62,9 @@ static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
62 [NFPROTO_IPV6] = "ip6", 62 [NFPROTO_IPV6] = "ip6",
63}; 63};
64 64
65/* Allow this many total (re)entries. */
66static const unsigned int xt_jumpstack_multiplier = 2;
67
65/* Registration hooks for targets. */ 68/* Registration hooks for targets. */
66int 69int
67xt_register_target(struct xt_target *target) 70xt_register_target(struct xt_target *target)
@@ -680,6 +683,26 @@ void xt_free_table_info(struct xt_table_info *info)
680 else 683 else
681 vfree(info->entries[cpu]); 684 vfree(info->entries[cpu]);
682 } 685 }
686
687 if (info->jumpstack != NULL) {
688 if (sizeof(void *) * info->stacksize > PAGE_SIZE) {
689 for_each_possible_cpu(cpu)
690 vfree(info->jumpstack[cpu]);
691 } else {
692 for_each_possible_cpu(cpu)
693 kfree(info->jumpstack[cpu]);
694 }
695 }
696
697 if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE)
698 vfree(info->jumpstack);
699 else
700 kfree(info->jumpstack);
701 if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE)
702 vfree(info->stackptr);
703 else
704 kfree(info->stackptr);
705
683 kfree(info); 706 kfree(info);
684} 707}
685EXPORT_SYMBOL(xt_free_table_info); 708EXPORT_SYMBOL(xt_free_table_info);
@@ -724,6 +747,49 @@ EXPORT_SYMBOL_GPL(xt_compat_unlock);
724DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks); 747DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks);
725EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks); 748EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks);
726 749
750static int xt_jumpstack_alloc(struct xt_table_info *i)
751{
752 unsigned int size;
753 int cpu;
754
755 size = sizeof(unsigned int) * nr_cpu_ids;
756 if (size > PAGE_SIZE)
757 i->stackptr = vmalloc(size);
758 else
759 i->stackptr = kmalloc(size, GFP_KERNEL);
760 if (i->stackptr == NULL)
761 return -ENOMEM;
762 memset(i->stackptr, 0, size);
763
764 size = sizeof(void **) * nr_cpu_ids;
765 if (size > PAGE_SIZE)
766 i->jumpstack = vmalloc(size);
767 else
768 i->jumpstack = kmalloc(size, GFP_KERNEL);
769 if (i->jumpstack == NULL)
770 return -ENOMEM;
771 memset(i->jumpstack, 0, size);
772
773 i->stacksize *= xt_jumpstack_multiplier;
774 size = sizeof(void *) * i->stacksize;
775 for_each_possible_cpu(cpu) {
776 if (size > PAGE_SIZE)
777 i->jumpstack[cpu] = vmalloc_node(size,
778 cpu_to_node(cpu));
779 else
780 i->jumpstack[cpu] = kmalloc_node(size,
781 GFP_KERNEL, cpu_to_node(cpu));
782 if (i->jumpstack[cpu] == NULL)
783 /*
784 * Freeing will be done later on by the callers. The
785 * chain is: xt_replace_table -> __do_replace ->
786 * do_replace -> xt_free_table_info.
787 */
788 return -ENOMEM;
789 }
790
791 return 0;
792}
727 793
728struct xt_table_info * 794struct xt_table_info *
729xt_replace_table(struct xt_table *table, 795xt_replace_table(struct xt_table *table,
@@ -732,6 +798,7 @@ xt_replace_table(struct xt_table *table,
732 int *error) 798 int *error)
733{ 799{
734 struct xt_table_info *private; 800 struct xt_table_info *private;
801 int ret;
735 802
736 /* Do the substitution. */ 803 /* Do the substitution. */
737 local_bh_disable(); 804 local_bh_disable();
@@ -746,6 +813,12 @@ xt_replace_table(struct xt_table *table,
746 return NULL; 813 return NULL;
747 } 814 }
748 815
816 ret = xt_jumpstack_alloc(newinfo);
817 if (ret < 0) {
818 *error = ret;
819 return NULL;
820 }
821
749 table->private = newinfo; 822 table->private = newinfo;
750 newinfo->initial_entries = private->initial_entries; 823 newinfo->initial_entries = private->initial_entries;
751 824
@@ -770,6 +843,10 @@ struct xt_table *xt_register_table(struct net *net,
770 struct xt_table_info *private; 843 struct xt_table_info *private;
771 struct xt_table *t, *table; 844 struct xt_table *t, *table;
772 845
846 ret = xt_jumpstack_alloc(newinfo);
847 if (ret < 0)
848 return ERR_PTR(ret);
849
773 /* Don't add one object to multiple lists. */ 850 /* Don't add one object to multiple lists. */
774 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); 851 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
775 if (!table) { 852 if (!table) {