diff options
author | Jan Engelhardt <jengelh@medozas.de> | 2010-04-19 10:05:10 -0400 |
---|---|---|
committer | Patrick McHardy <kaber@trash.net> | 2010-04-19 10:05:10 -0400 |
commit | f3c5c1bfd430858d3a05436f82c51e53104feb6b (patch) | |
tree | ada5b570b66e141e79fdb256f69e2541a3d30c04 /net/ipv6 | |
parent | e281b19897dc21c1071802808d461627d747a877 (diff) |
netfilter: xtables: make ip_tables reentrant
Currently, the table traverser stores return addresses in the ruleset
itself (struct ip6t_entry->comefrom). This has a well-known drawback:
the jumpstack is overwritten on reentry, making it necessary for
targets to return absolute verdicts. Also, the ruleset (which might
be heavy memory-wise) needs to be replicated for each CPU that can
possibly invoke ip6t_do_table.
This patch decouples the jumpstack from struct ip6t_entry and instead
puts it into xt_table_info. Not being restricted by 'comefrom'
anymore, we can set up a stack as needed. By default, there is room
allocated for two entries into the traverser.
arp_tables is not touched though, because there is just one/two
modules and further patches seek to collapse the table traverser
anyhow.
Signed-off-by: Jan Engelhardt <jengelh@medozas.de>
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r-- | net/ipv6/netfilter/ip6_tables.c | 56 |
1 files changed, 22 insertions, 34 deletions
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index f2b815e72329..2a2770bcd640 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -351,15 +351,14 @@ ip6t_do_table(struct sk_buff *skb, | |||
351 | const struct net_device *out, | 351 | const struct net_device *out, |
352 | struct xt_table *table) | 352 | struct xt_table *table) |
353 | { | 353 | { |
354 | #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom | ||
355 | |||
356 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 354 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
357 | bool hotdrop = false; | 355 | bool hotdrop = false; |
358 | /* Initializing verdict to NF_DROP keeps gcc happy. */ | 356 | /* Initializing verdict to NF_DROP keeps gcc happy. */ |
359 | unsigned int verdict = NF_DROP; | 357 | unsigned int verdict = NF_DROP; |
360 | const char *indev, *outdev; | 358 | const char *indev, *outdev; |
361 | const void *table_base; | 359 | const void *table_base; |
362 | struct ip6t_entry *e, *back; | 360 | struct ip6t_entry *e, **jumpstack; |
361 | unsigned int *stackptr, origptr, cpu; | ||
363 | const struct xt_table_info *private; | 362 | const struct xt_table_info *private; |
364 | struct xt_match_param mtpar; | 363 | struct xt_match_param mtpar; |
365 | struct xt_target_param tgpar; | 364 | struct xt_target_param tgpar; |
@@ -383,19 +382,19 @@ ip6t_do_table(struct sk_buff *skb, | |||
383 | 382 | ||
384 | xt_info_rdlock_bh(); | 383 | xt_info_rdlock_bh(); |
385 | private = table->private; | 384 | private = table->private; |
386 | table_base = private->entries[smp_processor_id()]; | 385 | cpu = smp_processor_id(); |
386 | table_base = private->entries[cpu]; | ||
387 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; | ||
388 | stackptr = &private->stackptr[cpu]; | ||
389 | origptr = *stackptr; | ||
387 | 390 | ||
388 | e = get_entry(table_base, private->hook_entry[hook]); | 391 | e = get_entry(table_base, private->hook_entry[hook]); |
389 | 392 | ||
390 | /* For return from builtin chain */ | ||
391 | back = get_entry(table_base, private->underflow[hook]); | ||
392 | |||
393 | do { | 393 | do { |
394 | const struct ip6t_entry_target *t; | 394 | const struct ip6t_entry_target *t; |
395 | const struct xt_entry_match *ematch; | 395 | const struct xt_entry_match *ematch; |
396 | 396 | ||
397 | IP_NF_ASSERT(e); | 397 | IP_NF_ASSERT(e); |
398 | IP_NF_ASSERT(back); | ||
399 | if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, | 398 | if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, |
400 | &mtpar.thoff, &mtpar.fragoff, &hotdrop)) { | 399 | &mtpar.thoff, &mtpar.fragoff, &hotdrop)) { |
401 | no_match: | 400 | no_match: |
@@ -432,17 +431,20 @@ ip6t_do_table(struct sk_buff *skb, | |||
432 | verdict = (unsigned)(-v) - 1; | 431 | verdict = (unsigned)(-v) - 1; |
433 | break; | 432 | break; |
434 | } | 433 | } |
435 | e = back; | 434 | if (*stackptr == 0) |
436 | back = get_entry(table_base, back->comefrom); | 435 | e = get_entry(table_base, |
436 | private->underflow[hook]); | ||
437 | else | ||
438 | e = ip6t_next_entry(jumpstack[--*stackptr]); | ||
437 | continue; | 439 | continue; |
438 | } | 440 | } |
439 | if (table_base + v != ip6t_next_entry(e) && | 441 | if (table_base + v != ip6t_next_entry(e) && |
440 | !(e->ipv6.flags & IP6T_F_GOTO)) { | 442 | !(e->ipv6.flags & IP6T_F_GOTO)) { |
441 | /* Save old back ptr in next entry */ | 443 | if (*stackptr >= private->stacksize) { |
442 | struct ip6t_entry *next = ip6t_next_entry(e); | 444 | verdict = NF_DROP; |
443 | next->comefrom = (void *)back - table_base; | 445 | break; |
444 | /* set back pointer to next entry */ | 446 | } |
445 | back = next; | 447 | jumpstack[(*stackptr)++] = e; |
446 | } | 448 | } |
447 | 449 | ||
448 | e = get_entry(table_base, v); | 450 | e = get_entry(table_base, v); |
@@ -454,19 +456,7 @@ ip6t_do_table(struct sk_buff *skb, | |||
454 | tgpar.target = t->u.kernel.target; | 456 | tgpar.target = t->u.kernel.target; |
455 | tgpar.targinfo = t->data; | 457 | tgpar.targinfo = t->data; |
456 | 458 | ||
457 | #ifdef CONFIG_NETFILTER_DEBUG | ||
458 | tb_comefrom = 0xeeeeeeec; | ||
459 | #endif | ||
460 | verdict = t->u.kernel.target->target(skb, &tgpar); | 459 | verdict = t->u.kernel.target->target(skb, &tgpar); |
461 | |||
462 | #ifdef CONFIG_NETFILTER_DEBUG | ||
463 | if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) { | ||
464 | printk("Target %s reentered!\n", | ||
465 | t->u.kernel.target->name); | ||
466 | verdict = NF_DROP; | ||
467 | } | ||
468 | tb_comefrom = 0x57acc001; | ||
469 | #endif | ||
470 | if (verdict == IP6T_CONTINUE) | 460 | if (verdict == IP6T_CONTINUE) |
471 | e = ip6t_next_entry(e); | 461 | e = ip6t_next_entry(e); |
472 | else | 462 | else |
@@ -474,10 +464,8 @@ ip6t_do_table(struct sk_buff *skb, | |||
474 | break; | 464 | break; |
475 | } while (!hotdrop); | 465 | } while (!hotdrop); |
476 | 466 | ||
477 | #ifdef CONFIG_NETFILTER_DEBUG | ||
478 | tb_comefrom = NETFILTER_LINK_POISON; | ||
479 | #endif | ||
480 | xt_info_rdunlock_bh(); | 467 | xt_info_rdunlock_bh(); |
468 | *stackptr = origptr; | ||
481 | 469 | ||
482 | #ifdef DEBUG_ALLOW_ALL | 470 | #ifdef DEBUG_ALLOW_ALL |
483 | return NF_ACCEPT; | 471 | return NF_ACCEPT; |
@@ -486,8 +474,6 @@ ip6t_do_table(struct sk_buff *skb, | |||
486 | return NF_DROP; | 474 | return NF_DROP; |
487 | else return verdict; | 475 | else return verdict; |
488 | #endif | 476 | #endif |
489 | |||
490 | #undef tb_comefrom | ||
491 | } | 477 | } |
492 | 478 | ||
493 | /* Figures out from what hook each rule can be called: returns 0 if | 479 | /* Figures out from what hook each rule can be called: returns 0 if |
@@ -869,6 +855,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, | |||
869 | if (ret != 0) | 855 | if (ret != 0) |
870 | return ret; | 856 | return ret; |
871 | ++i; | 857 | ++i; |
858 | if (strcmp(ip6t_get_target(iter)->u.user.name, | ||
859 | XT_ERROR_TARGET) == 0) | ||
860 | ++newinfo->stacksize; | ||
872 | } | 861 | } |
873 | 862 | ||
874 | if (i != repl->num_entries) { | 863 | if (i != repl->num_entries) { |
@@ -2120,8 +2109,7 @@ struct xt_table *ip6t_register_table(struct net *net, | |||
2120 | { | 2109 | { |
2121 | int ret; | 2110 | int ret; |
2122 | struct xt_table_info *newinfo; | 2111 | struct xt_table_info *newinfo; |
2123 | struct xt_table_info bootstrap | 2112 | struct xt_table_info bootstrap = {0}; |
2124 | = { 0, 0, 0, { 0 }, { 0 }, { } }; | ||
2125 | void *loc_cpu_entry; | 2113 | void *loc_cpu_entry; |
2126 | struct xt_table *new_table; | 2114 | struct xt_table *new_table; |
2127 | 2115 | ||