aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/netfilter/ip_tables.c
diff options
context:
space:
mode:
authorHarald Welte <laforge@netfilter.org>2006-01-12 16:30:04 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-01-12 17:06:43 -0500
commit2e4e6a17af35be359cc8f1c924f8f198fbd478cc (patch)
treecb4b5438dcf9ff9d57518a26124308bcbfffd214 /net/ipv4/netfilter/ip_tables.c
parent880b005f294454d989783d0984dc554dfe3c8214 (diff)
[NETFILTER] x_tables: Abstraction layer for {ip,ip6,arp}_tables
This monster-patch tries to do the best job for unifying the data structures and backend interfaces for the three evil clones ip_tables, ip6_tables and arp_tables. In an ideal world we would never have allowed this kind of copy+paste programming... but well, our world isn't (yet?) ideal. o introduce a new x_tables module o {ip,arp,ip6}_tables depend on this x_tables module o registration functions for tables, matches and targets are only wrappers around x_tables provided functions o all matches/targets that are used from ip_tables and ip6_tables are now implemented as xt_FOOBAR.c files and provide module aliases to ipt_FOOBAR and ip6t_FOOBAR o header files for xt_matches are in include/linux/netfilter/, include/linux/netfilter_{ipv4,ipv6} contains compatibility wrappers around the xt_FOOBAR.h headers Based on this patchset we're going to further unify the code, gradually getting rid of all the layer 3 specific assumptions. Signed-off-by: Harald Welte <laforge@netfilter.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/netfilter/ip_tables.c')
-rw-r--r--net/ipv4/netfilter/ip_tables.c842
1 files changed, 95 insertions, 747 deletions
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 877bc96d333..2371b2062c2 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -2,7 +2,7 @@
2 * Packet matching code. 2 * Packet matching code.
3 * 3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> 5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -11,6 +11,8 @@
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org> 11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside 12 * - increase module usage count as soon as we have rules inside
13 * a table 13 * a table
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
14 */ 16 */
15#include <linux/config.h> 17#include <linux/config.h>
16#include <linux/cache.h> 18#include <linux/cache.h>
@@ -20,8 +22,6 @@
20#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
21#include <linux/netdevice.h> 23#include <linux/netdevice.h>
22#include <linux/module.h> 24#include <linux/module.h>
23#include <linux/tcp.h>
24#include <linux/udp.h>
25#include <linux/icmp.h> 25#include <linux/icmp.h>
26#include <net/ip.h> 26#include <net/ip.h>
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
@@ -30,6 +30,7 @@
30#include <linux/err.h> 30#include <linux/err.h>
31#include <linux/cpumask.h> 31#include <linux/cpumask.h>
32 32
33#include <linux/netfilter/x_tables.h>
33#include <linux/netfilter_ipv4/ip_tables.h> 34#include <linux/netfilter_ipv4/ip_tables.h>
34 35
35MODULE_LICENSE("GPL"); 36MODULE_LICENSE("GPL");
@@ -62,14 +63,6 @@ do { \
62#else 63#else
63#define IP_NF_ASSERT(x) 64#define IP_NF_ASSERT(x)
64#endif 65#endif
65#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
66
67static DECLARE_MUTEX(ipt_mutex);
68
69/* Must have mutex */
70#define ASSERT_READ_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
71#define ASSERT_WRITE_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
72#include <linux/netfilter_ipv4/listhelp.h>
73 66
74#if 0 67#if 0
75/* All the better to debug you with... */ 68/* All the better to debug you with... */
@@ -86,36 +79,6 @@ static DECLARE_MUTEX(ipt_mutex);
86 79
87 Hence the start of any table is given by get_table() below. */ 80 Hence the start of any table is given by get_table() below. */
88 81
89/* The table itself */
90struct ipt_table_info
91{
92 /* Size per table */
93 unsigned int size;
94 /* Number of entries: FIXME. --RR */
95 unsigned int number;
96 /* Initial number of entries. Needed for module usage count */
97 unsigned int initial_entries;
98
99 /* Entry points and underflows */
100 unsigned int hook_entry[NF_IP_NUMHOOKS];
101 unsigned int underflow[NF_IP_NUMHOOKS];
102
103 /* ipt_entry tables: one per CPU */
104 void *entries[NR_CPUS];
105};
106
107static LIST_HEAD(ipt_target);
108static LIST_HEAD(ipt_match);
109static LIST_HEAD(ipt_tables);
110#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
111#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
112
113#if 0
114#define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
115#define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
116#define up(x) do { printk("UP:%u:" #x "\n", __LINE__); up(x); } while(0)
117#endif
118
119/* Returns whether matches rule or not. */ 82/* Returns whether matches rule or not. */
120static inline int 83static inline int
121ip_packet_match(const struct iphdr *ip, 84ip_packet_match(const struct iphdr *ip,
@@ -234,7 +197,8 @@ int do_match(struct ipt_entry_match *m,
234 int *hotdrop) 197 int *hotdrop)
235{ 198{
236 /* Stop iteration if it doesn't match */ 199 /* Stop iteration if it doesn't match */
237 if (!m->u.kernel.match->match(skb, in, out, m->data, offset, hotdrop)) 200 if (!m->u.kernel.match->match(skb, in, out, m->data, offset,
201 skb->nh.iph->ihl*4, hotdrop))
238 return 1; 202 return 1;
239 else 203 else
240 return 0; 204 return 0;
@@ -265,6 +229,7 @@ ipt_do_table(struct sk_buff **pskb,
265 const char *indev, *outdev; 229 const char *indev, *outdev;
266 void *table_base; 230 void *table_base;
267 struct ipt_entry *e, *back; 231 struct ipt_entry *e, *back;
232 struct xt_table_info *private = table->private;
268 233
269 /* Initialization */ 234 /* Initialization */
270 ip = (*pskb)->nh.iph; 235 ip = (*pskb)->nh.iph;
@@ -281,24 +246,11 @@ ipt_do_table(struct sk_buff **pskb,
281 246
282 read_lock_bh(&table->lock); 247 read_lock_bh(&table->lock);
283 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 248 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
284 table_base = (void *)table->private->entries[smp_processor_id()]; 249 table_base = (void *)private->entries[smp_processor_id()];
285 e = get_entry(table_base, table->private->hook_entry[hook]); 250 e = get_entry(table_base, private->hook_entry[hook]);
286
287#ifdef CONFIG_NETFILTER_DEBUG
288 /* Check noone else using our table */
289 if (((struct ipt_entry *)table_base)->comefrom != 0xdead57ac
290 && ((struct ipt_entry *)table_base)->comefrom != 0xeeeeeeec) {
291 printk("ASSERT: CPU #%u, %s comefrom(%p) = %X\n",
292 smp_processor_id(),
293 table->name,
294 &((struct ipt_entry *)table_base)->comefrom,
295 ((struct ipt_entry *)table_base)->comefrom);
296 }
297 ((struct ipt_entry *)table_base)->comefrom = 0x57acc001;
298#endif
299 251
300 /* For return from builtin chain */ 252 /* For return from builtin chain */
301 back = get_entry(table_base, table->private->underflow[hook]); 253 back = get_entry(table_base, private->underflow[hook]);
302 254
303 do { 255 do {
304 IP_NF_ASSERT(e); 256 IP_NF_ASSERT(e);
@@ -384,9 +336,6 @@ ipt_do_table(struct sk_buff **pskb,
384 } 336 }
385 } while (!hotdrop); 337 } while (!hotdrop);
386 338
387#ifdef CONFIG_NETFILTER_DEBUG
388 ((struct ipt_entry *)table_base)->comefrom = 0xdead57ac;
389#endif
390 read_unlock_bh(&table->lock); 339 read_unlock_bh(&table->lock);
391 340
392#ifdef DEBUG_ALLOW_ALL 341#ifdef DEBUG_ALLOW_ALL
@@ -398,145 +347,6 @@ ipt_do_table(struct sk_buff **pskb,
398#endif 347#endif
399} 348}
400 349
401/*
402 * These are weird, but module loading must not be done with mutex
403 * held (since they will register), and we have to have a single
404 * function to use try_then_request_module().
405 */
406
407/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
408static inline struct ipt_table *find_table_lock(const char *name)
409{
410 struct ipt_table *t;
411
412 if (down_interruptible(&ipt_mutex) != 0)
413 return ERR_PTR(-EINTR);
414
415 list_for_each_entry(t, &ipt_tables, list)
416 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
417 return t;
418 up(&ipt_mutex);
419 return NULL;
420}
421
422/* Find match, grabs ref. Returns ERR_PTR() on error. */
423static inline struct ipt_match *find_match(const char *name, u8 revision)
424{
425 struct ipt_match *m;
426 int err = 0;
427
428 if (down_interruptible(&ipt_mutex) != 0)
429 return ERR_PTR(-EINTR);
430
431 list_for_each_entry(m, &ipt_match, list) {
432 if (strcmp(m->name, name) == 0) {
433 if (m->revision == revision) {
434 if (try_module_get(m->me)) {
435 up(&ipt_mutex);
436 return m;
437 }
438 } else
439 err = -EPROTOTYPE; /* Found something. */
440 }
441 }
442 up(&ipt_mutex);
443 return ERR_PTR(err);
444}
445
446/* Find target, grabs ref. Returns ERR_PTR() on error. */
447static inline struct ipt_target *find_target(const char *name, u8 revision)
448{
449 struct ipt_target *t;
450 int err = 0;
451
452 if (down_interruptible(&ipt_mutex) != 0)
453 return ERR_PTR(-EINTR);
454
455 list_for_each_entry(t, &ipt_target, list) {
456 if (strcmp(t->name, name) == 0) {
457 if (t->revision == revision) {
458 if (try_module_get(t->me)) {
459 up(&ipt_mutex);
460 return t;
461 }
462 } else
463 err = -EPROTOTYPE; /* Found something. */
464 }
465 }
466 up(&ipt_mutex);
467 return ERR_PTR(err);
468}
469
470struct ipt_target *ipt_find_target(const char *name, u8 revision)
471{
472 struct ipt_target *target;
473
474 target = try_then_request_module(find_target(name, revision),
475 "ipt_%s", name);
476 if (IS_ERR(target) || !target)
477 return NULL;
478 return target;
479}
480
481static int match_revfn(const char *name, u8 revision, int *bestp)
482{
483 struct ipt_match *m;
484 int have_rev = 0;
485
486 list_for_each_entry(m, &ipt_match, list) {
487 if (strcmp(m->name, name) == 0) {
488 if (m->revision > *bestp)
489 *bestp = m->revision;
490 if (m->revision == revision)
491 have_rev = 1;
492 }
493 }
494 return have_rev;
495}
496
497static int target_revfn(const char *name, u8 revision, int *bestp)
498{
499 struct ipt_target *t;
500 int have_rev = 0;
501
502 list_for_each_entry(t, &ipt_target, list) {
503 if (strcmp(t->name, name) == 0) {
504 if (t->revision > *bestp)
505 *bestp = t->revision;
506 if (t->revision == revision)
507 have_rev = 1;
508 }
509 }
510 return have_rev;
511}
512
513/* Returns true or false (if no such extension at all) */
514static inline int find_revision(const char *name, u8 revision,
515 int (*revfn)(const char *, u8, int *),
516 int *err)
517{
518 int have_rev, best = -1;
519
520 if (down_interruptible(&ipt_mutex) != 0) {
521 *err = -EINTR;
522 return 1;
523 }
524 have_rev = revfn(name, revision, &best);
525 up(&ipt_mutex);
526
527 /* Nothing at all? Return 0 to try loading module. */
528 if (best == -1) {
529 *err = -ENOENT;
530 return 0;
531 }
532
533 *err = best;
534 if (!have_rev)
535 *err = -EPROTONOSUPPORT;
536 return 1;
537}
538
539
540/* All zeroes == unconditional rule. */ 350/* All zeroes == unconditional rule. */
541static inline int 351static inline int
542unconditional(const struct ipt_ip *ip) 352unconditional(const struct ipt_ip *ip)
@@ -553,7 +363,7 @@ unconditional(const struct ipt_ip *ip)
553/* Figures out from what hook each rule can be called: returns 0 if 363/* Figures out from what hook each rule can be called: returns 0 if
554 there are loops. Puts hook bitmask in comefrom. */ 364 there are loops. Puts hook bitmask in comefrom. */
555static int 365static int
556mark_source_chains(struct ipt_table_info *newinfo, 366mark_source_chains(struct xt_table_info *newinfo,
557 unsigned int valid_hooks, void *entry0) 367 unsigned int valid_hooks, void *entry0)
558{ 368{
559 unsigned int hook; 369 unsigned int hook;
@@ -699,7 +509,7 @@ check_match(struct ipt_entry_match *m,
699{ 509{
700 struct ipt_match *match; 510 struct ipt_match *match;
701 511
702 match = try_then_request_module(find_match(m->u.user.name, 512 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
703 m->u.user.revision), 513 m->u.user.revision),
704 "ipt_%s", m->u.user.name); 514 "ipt_%s", m->u.user.name);
705 if (IS_ERR(match) || !match) { 515 if (IS_ERR(match) || !match) {
@@ -744,7 +554,8 @@ check_entry(struct ipt_entry *e, const char *name, unsigned int size,
744 goto cleanup_matches; 554 goto cleanup_matches;
745 555
746 t = ipt_get_target(e); 556 t = ipt_get_target(e);
747 target = try_then_request_module(find_target(t->u.user.name, 557 target = try_then_request_module(xt_find_target(AF_INET,
558 t->u.user.name,
748 t->u.user.revision), 559 t->u.user.revision),
749 "ipt_%s", t->u.user.name); 560 "ipt_%s", t->u.user.name);
750 if (IS_ERR(target) || !target) { 561 if (IS_ERR(target) || !target) {
@@ -781,7 +592,7 @@ check_entry(struct ipt_entry *e, const char *name, unsigned int size,
781 592
782static inline int 593static inline int
783check_entry_size_and_hooks(struct ipt_entry *e, 594check_entry_size_and_hooks(struct ipt_entry *e,
784 struct ipt_table_info *newinfo, 595 struct xt_table_info *newinfo,
785 unsigned char *base, 596 unsigned char *base,
786 unsigned char *limit, 597 unsigned char *limit,
787 const unsigned int *hook_entries, 598 const unsigned int *hook_entries,
@@ -815,7 +626,7 @@ check_entry_size_and_hooks(struct ipt_entry *e,
815 < 0 (not IPT_RETURN). --RR */ 626 < 0 (not IPT_RETURN). --RR */
816 627
817 /* Clear counters and comefrom */ 628 /* Clear counters and comefrom */
818 e->counters = ((struct ipt_counters) { 0, 0 }); 629 e->counters = ((struct xt_counters) { 0, 0 });
819 e->comefrom = 0; 630 e->comefrom = 0;
820 631
821 (*i)++; 632 (*i)++;
@@ -845,7 +656,7 @@ cleanup_entry(struct ipt_entry *e, unsigned int *i)
845static int 656static int
846translate_table(const char *name, 657translate_table(const char *name,
847 unsigned int valid_hooks, 658 unsigned int valid_hooks,
848 struct ipt_table_info *newinfo, 659 struct xt_table_info *newinfo,
849 void *entry0, 660 void *entry0,
850 unsigned int size, 661 unsigned int size,
851 unsigned int number, 662 unsigned int number,
@@ -922,48 +733,10 @@ translate_table(const char *name,
922 return ret; 733 return ret;
923} 734}
924 735
925static struct ipt_table_info *
926replace_table(struct ipt_table *table,
927 unsigned int num_counters,
928 struct ipt_table_info *newinfo,
929 int *error)
930{
931 struct ipt_table_info *oldinfo;
932
933#ifdef CONFIG_NETFILTER_DEBUG
934 {
935 int cpu;
936
937 for_each_cpu(cpu) {
938 struct ipt_entry *table_base = newinfo->entries[cpu];
939 if (table_base)
940 table_base->comefrom = 0xdead57ac;
941 }
942 }
943#endif
944
945 /* Do the substitution. */
946 write_lock_bh(&table->lock);
947 /* Check inside lock: is the old number correct? */
948 if (num_counters != table->private->number) {
949 duprintf("num_counters != table->private->number (%u/%u)\n",
950 num_counters, table->private->number);
951 write_unlock_bh(&table->lock);
952 *error = -EAGAIN;
953 return NULL;
954 }
955 oldinfo = table->private;
956 table->private = newinfo;
957 newinfo->initial_entries = oldinfo->initial_entries;
958 write_unlock_bh(&table->lock);
959
960 return oldinfo;
961}
962
963/* Gets counters. */ 736/* Gets counters. */
964static inline int 737static inline int
965add_entry_to_counter(const struct ipt_entry *e, 738add_entry_to_counter(const struct ipt_entry *e,
966 struct ipt_counters total[], 739 struct xt_counters total[],
967 unsigned int *i) 740 unsigned int *i)
968{ 741{
969 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); 742 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
@@ -984,8 +757,8 @@ set_entry_to_counter(const struct ipt_entry *e,
984} 757}
985 758
986static void 759static void
987get_counters(const struct ipt_table_info *t, 760get_counters(const struct xt_table_info *t,
988 struct ipt_counters counters[]) 761 struct xt_counters counters[])
989{ 762{
990 unsigned int cpu; 763 unsigned int cpu;
991 unsigned int i; 764 unsigned int i;
@@ -1024,14 +797,15 @@ copy_entries_to_user(unsigned int total_size,
1024{ 797{
1025 unsigned int off, num, countersize; 798 unsigned int off, num, countersize;
1026 struct ipt_entry *e; 799 struct ipt_entry *e;
1027 struct ipt_counters *counters; 800 struct xt_counters *counters;
801 struct xt_table_info *private = table->private;
1028 int ret = 0; 802 int ret = 0;
1029 void *loc_cpu_entry; 803 void *loc_cpu_entry;
1030 804
1031 /* We need atomic snapshot of counters: rest doesn't change 805 /* We need atomic snapshot of counters: rest doesn't change
1032 (other than comefrom, which userspace doesn't care 806 (other than comefrom, which userspace doesn't care
1033 about). */ 807 about). */
1034 countersize = sizeof(struct ipt_counters) * table->private->number; 808 countersize = sizeof(struct xt_counters) * private->number;
1035 counters = vmalloc_node(countersize, numa_node_id()); 809 counters = vmalloc_node(countersize, numa_node_id());
1036 810
1037 if (counters == NULL) 811 if (counters == NULL)
@@ -1039,14 +813,14 @@ copy_entries_to_user(unsigned int total_size,
1039 813
1040 /* First, sum counters... */ 814 /* First, sum counters... */
1041 write_lock_bh(&table->lock); 815 write_lock_bh(&table->lock);
1042 get_counters(table->private, counters); 816 get_counters(private, counters);
1043 write_unlock_bh(&table->lock); 817 write_unlock_bh(&table->lock);
1044 818
1045 /* choose the copy that is on our node/cpu, ... 819 /* choose the copy that is on our node/cpu, ...
1046 * This choice is lazy (because current thread is 820 * This choice is lazy (because current thread is
1047 * allowed to migrate to another cpu) 821 * allowed to migrate to another cpu)
1048 */ 822 */
1049 loc_cpu_entry = table->private->entries[raw_smp_processor_id()]; 823 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1050 /* ... then copy entire thing ... */ 824 /* ... then copy entire thing ... */
1051 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { 825 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1052 ret = -EFAULT; 826 ret = -EFAULT;
@@ -1108,74 +882,36 @@ get_entries(const struct ipt_get_entries *entries,
1108 int ret; 882 int ret;
1109 struct ipt_table *t; 883 struct ipt_table *t;
1110 884
1111 t = find_table_lock(entries->name); 885 t = xt_find_table_lock(AF_INET, entries->name);
1112 if (t && !IS_ERR(t)) { 886 if (t && !IS_ERR(t)) {
887 struct xt_table_info *private = t->private;
1113 duprintf("t->private->number = %u\n", 888 duprintf("t->private->number = %u\n",
1114 t->private->number); 889 private->number);
1115 if (entries->size == t->private->size) 890 if (entries->size == private->size)
1116 ret = copy_entries_to_user(t->private->size, 891 ret = copy_entries_to_user(private->size,
1117 t, uptr->entrytable); 892 t, uptr->entrytable);
1118 else { 893 else {
1119 duprintf("get_entries: I've got %u not %u!\n", 894 duprintf("get_entries: I've got %u not %u!\n",
1120 t->private->size, 895 private->size,
1121 entries->size); 896 entries->size);
1122 ret = -EINVAL; 897 ret = -EINVAL;
1123 } 898 }
1124 module_put(t->me); 899 module_put(t->me);
1125 up(&ipt_mutex); 900 xt_table_unlock(t);
1126 } else 901 } else
1127 ret = t ? PTR_ERR(t) : -ENOENT; 902 ret = t ? PTR_ERR(t) : -ENOENT;
1128 903
1129 return ret; 904 return ret;
1130} 905}
1131 906
1132static void free_table_info(struct ipt_table_info *info)
1133{
1134 int cpu;
1135 for_each_cpu(cpu) {
1136 if (info->size <= PAGE_SIZE)
1137 kfree(info->entries[cpu]);
1138 else
1139 vfree(info->entries[cpu]);
1140 }
1141 kfree(info);
1142}
1143
1144static struct ipt_table_info *alloc_table_info(unsigned int size)
1145{
1146 struct ipt_table_info *newinfo;
1147 int cpu;
1148
1149 newinfo = kzalloc(sizeof(struct ipt_table_info), GFP_KERNEL);
1150 if (!newinfo)
1151 return NULL;
1152
1153 newinfo->size = size;
1154
1155 for_each_cpu(cpu) {
1156 if (size <= PAGE_SIZE)
1157 newinfo->entries[cpu] = kmalloc_node(size,
1158 GFP_KERNEL,
1159 cpu_to_node(cpu));
1160 else
1161 newinfo->entries[cpu] = vmalloc_node(size, cpu_to_node(cpu));
1162 if (newinfo->entries[cpu] == 0) {
1163 free_table_info(newinfo);
1164 return NULL;
1165 }
1166 }
1167
1168 return newinfo;
1169}
1170
1171static int 907static int
1172do_replace(void __user *user, unsigned int len) 908do_replace(void __user *user, unsigned int len)
1173{ 909{
1174 int ret; 910 int ret;
1175 struct ipt_replace tmp; 911 struct ipt_replace tmp;
1176 struct ipt_table *t; 912 struct ipt_table *t;
1177 struct ipt_table_info *newinfo, *oldinfo; 913 struct xt_table_info *newinfo, *oldinfo;
1178 struct ipt_counters *counters; 914 struct xt_counters *counters;
1179 void *loc_cpu_entry, *loc_cpu_old_entry; 915 void *loc_cpu_entry, *loc_cpu_old_entry;
1180 916
1181 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 917 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
@@ -1185,11 +921,7 @@ do_replace(void __user *user, unsigned int len)
1185 if (len != sizeof(tmp) + tmp.size) 921 if (len != sizeof(tmp) + tmp.size)
1186 return -ENOPROTOOPT; 922 return -ENOPROTOOPT;
1187 923
1188 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ 924 newinfo = xt_alloc_table_info(tmp.size);
1189 if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages)
1190 return -ENOMEM;
1191
1192 newinfo = alloc_table_info(tmp.size);
1193 if (!newinfo) 925 if (!newinfo)
1194 return -ENOMEM; 926 return -ENOMEM;
1195 927
@@ -1201,7 +933,7 @@ do_replace(void __user *user, unsigned int len)
1201 goto free_newinfo; 933 goto free_newinfo;
1202 } 934 }
1203 935
1204 counters = vmalloc(tmp.num_counters * sizeof(struct ipt_counters)); 936 counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
1205 if (!counters) { 937 if (!counters) {
1206 ret = -ENOMEM; 938 ret = -ENOMEM;
1207 goto free_newinfo; 939 goto free_newinfo;
@@ -1215,7 +947,7 @@ do_replace(void __user *user, unsigned int len)
1215 947
1216 duprintf("ip_tables: Translated table\n"); 948 duprintf("ip_tables: Translated table\n");
1217 949
1218 t = try_then_request_module(find_table_lock(tmp.name), 950 t = try_then_request_module(xt_find_table_lock(AF_INET, tmp.name),
1219 "iptable_%s", tmp.name); 951 "iptable_%s", tmp.name);
1220 if (!t || IS_ERR(t)) { 952 if (!t || IS_ERR(t)) {
1221 ret = t ? PTR_ERR(t) : -ENOENT; 953 ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1230,7 +962,7 @@ do_replace(void __user *user, unsigned int len)
1230 goto put_module; 962 goto put_module;
1231 } 963 }
1232 964
1233 oldinfo = replace_table(t, tmp.num_counters, newinfo, &ret); 965 oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);
1234 if (!oldinfo) 966 if (!oldinfo)
1235 goto put_module; 967 goto put_module;
1236 968
@@ -1249,23 +981,23 @@ do_replace(void __user *user, unsigned int len)
1249 /* Decrease module usage counts and free resource */ 981 /* Decrease module usage counts and free resource */
1250 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 982 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1251 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL); 983 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1252 free_table_info(oldinfo); 984 xt_free_table_info(oldinfo);
1253 if (copy_to_user(tmp.counters, counters, 985 if (copy_to_user(tmp.counters, counters,
1254 sizeof(struct ipt_counters) * tmp.num_counters) != 0) 986 sizeof(struct xt_counters) * tmp.num_counters) != 0)
1255 ret = -EFAULT; 987 ret = -EFAULT;
1256 vfree(counters); 988 vfree(counters);
1257 up(&ipt_mutex); 989 xt_table_unlock(t);
1258 return ret; 990 return ret;
1259 991
1260 put_module: 992 put_module:
1261 module_put(t->me); 993 module_put(t->me);
1262 up(&ipt_mutex); 994 xt_table_unlock(t);
1263 free_newinfo_counters_untrans: 995 free_newinfo_counters_untrans:
1264 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL); 996 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1265 free_newinfo_counters: 997 free_newinfo_counters:
1266 vfree(counters); 998 vfree(counters);
1267 free_newinfo: 999 free_newinfo:
1268 free_table_info(newinfo); 1000 xt_free_table_info(newinfo);
1269 return ret; 1001 return ret;
1270} 1002}
1271 1003
@@ -1273,7 +1005,7 @@ do_replace(void __user *user, unsigned int len)
1273 * and everything is OK. */ 1005 * and everything is OK. */
1274static inline int 1006static inline int
1275add_counter_to_entry(struct ipt_entry *e, 1007add_counter_to_entry(struct ipt_entry *e,
1276 const struct ipt_counters addme[], 1008 const struct xt_counters addme[],
1277 unsigned int *i) 1009 unsigned int *i)
1278{ 1010{
1279#if 0 1011#if 0
@@ -1295,15 +1027,16 @@ static int
1295do_add_counters(void __user *user, unsigned int len) 1027do_add_counters(void __user *user, unsigned int len)
1296{ 1028{
1297 unsigned int i; 1029 unsigned int i;
1298 struct ipt_counters_info tmp, *paddc; 1030 struct xt_counters_info tmp, *paddc;
1299 struct ipt_table *t; 1031 struct ipt_table *t;
1032 struct xt_table_info *private;
1300 int ret = 0; 1033 int ret = 0;
1301 void *loc_cpu_entry; 1034 void *loc_cpu_entry;
1302 1035
1303 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1036 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1304 return -EFAULT; 1037 return -EFAULT;
1305 1038
1306 if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct ipt_counters)) 1039 if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))
1307 return -EINVAL; 1040 return -EINVAL;
1308 1041
1309 paddc = vmalloc_node(len, numa_node_id()); 1042 paddc = vmalloc_node(len, numa_node_id());
@@ -1315,29 +1048,30 @@ do_add_counters(void __user *user, unsigned int len)
1315 goto free; 1048 goto free;
1316 } 1049 }
1317 1050
1318 t = find_table_lock(tmp.name); 1051 t = xt_find_table_lock(AF_INET, tmp.name);
1319 if (!t || IS_ERR(t)) { 1052 if (!t || IS_ERR(t)) {
1320 ret = t ? PTR_ERR(t) : -ENOENT; 1053 ret = t ? PTR_ERR(t) : -ENOENT;
1321 goto free; 1054 goto free;
1322 } 1055 }
1323 1056
1324 write_lock_bh(&t->lock); 1057 write_lock_bh(&t->lock);
1325 if (t->private->number != paddc->num_counters) { 1058 private = t->private;
1059 if (private->number != paddc->num_counters) {
1326 ret = -EINVAL; 1060 ret = -EINVAL;
1327 goto unlock_up_free; 1061 goto unlock_up_free;
1328 } 1062 }
1329 1063
1330 i = 0; 1064 i = 0;
1331 /* Choose the copy that is on our node */ 1065 /* Choose the copy that is on our node */
1332 loc_cpu_entry = t->private->entries[raw_smp_processor_id()]; 1066 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1333 IPT_ENTRY_ITERATE(loc_cpu_entry, 1067 IPT_ENTRY_ITERATE(loc_cpu_entry,
1334 t->private->size, 1068 private->size,
1335 add_counter_to_entry, 1069 add_counter_to_entry,
1336 paddc->counters, 1070 paddc->counters,
1337 &i); 1071 &i);
1338 unlock_up_free: 1072 unlock_up_free:
1339 write_unlock_bh(&t->lock); 1073 write_unlock_bh(&t->lock);
1340 up(&ipt_mutex); 1074 xt_table_unlock(t);
1341 module_put(t->me); 1075 module_put(t->me);
1342 free: 1076 free:
1343 vfree(paddc); 1077 vfree(paddc);
@@ -1396,25 +1130,26 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1396 } 1130 }
1397 name[IPT_TABLE_MAXNAMELEN-1] = '\0'; 1131 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1398 1132
1399 t = try_then_request_module(find_table_lock(name), 1133 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1400 "iptable_%s", name); 1134 "iptable_%s", name);
1401 if (t && !IS_ERR(t)) { 1135 if (t && !IS_ERR(t)) {
1402 struct ipt_getinfo info; 1136 struct ipt_getinfo info;
1137 struct xt_table_info *private = t->private;
1403 1138
1404 info.valid_hooks = t->valid_hooks; 1139 info.valid_hooks = t->valid_hooks;
1405 memcpy(info.hook_entry, t->private->hook_entry, 1140 memcpy(info.hook_entry, private->hook_entry,
1406 sizeof(info.hook_entry)); 1141 sizeof(info.hook_entry));
1407 memcpy(info.underflow, t->private->underflow, 1142 memcpy(info.underflow, private->underflow,
1408 sizeof(info.underflow)); 1143 sizeof(info.underflow));
1409 info.num_entries = t->private->number; 1144 info.num_entries = private->number;
1410 info.size = t->private->size; 1145 info.size = private->size;
1411 memcpy(info.name, name, sizeof(info.name)); 1146 memcpy(info.name, name, sizeof(info.name));
1412 1147
1413 if (copy_to_user(user, &info, *len) != 0) 1148 if (copy_to_user(user, &info, *len) != 0)
1414 ret = -EFAULT; 1149 ret = -EFAULT;
1415 else 1150 else
1416 ret = 0; 1151 ret = 0;
1417 up(&ipt_mutex); 1152 xt_table_unlock(t);
1418 module_put(t->me); 1153 module_put(t->me);
1419 } else 1154 } else
1420 ret = t ? PTR_ERR(t) : -ENOENT; 1155 ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1441,7 +1176,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1441 case IPT_SO_GET_REVISION_MATCH: 1176 case IPT_SO_GET_REVISION_MATCH:
1442 case IPT_SO_GET_REVISION_TARGET: { 1177 case IPT_SO_GET_REVISION_TARGET: {
1443 struct ipt_get_revision rev; 1178 struct ipt_get_revision rev;
1444 int (*revfn)(const char *, u8, int *); 1179 int target;
1445 1180
1446 if (*len != sizeof(rev)) { 1181 if (*len != sizeof(rev)) {
1447 ret = -EINVAL; 1182 ret = -EINVAL;
@@ -1453,12 +1188,13 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1453 } 1188 }
1454 1189
1455 if (cmd == IPT_SO_GET_REVISION_TARGET) 1190 if (cmd == IPT_SO_GET_REVISION_TARGET)
1456 revfn = target_revfn; 1191 target = 1;
1457 else 1192 else
1458 revfn = match_revfn; 1193 target = 0;
1459 1194
1460 try_then_request_module(find_revision(rev.name, rev.revision, 1195 try_then_request_module(xt_find_revision(AF_INET, rev.name,
1461 revfn, &ret), 1196 rev.revision,
1197 target, &ret),
1462 "ipt_%s", rev.name); 1198 "ipt_%s", rev.name);
1463 break; 1199 break;
1464 } 1200 }
@@ -1471,60 +1207,15 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1471 return ret; 1207 return ret;
1472} 1208}
1473 1209
1474/* Registration hooks for targets. */ 1210int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
1475int
1476ipt_register_target(struct ipt_target *target)
1477{ 1211{
1478 int ret; 1212 int ret;
1479 1213 struct xt_table_info *newinfo;
1480 ret = down_interruptible(&ipt_mutex); 1214 static struct xt_table_info bootstrap
1481 if (ret != 0)
1482 return ret;
1483 list_add(&target->list, &ipt_target);
1484 up(&ipt_mutex);
1485 return ret;
1486}
1487
1488void
1489ipt_unregister_target(struct ipt_target *target)
1490{
1491 down(&ipt_mutex);
1492 LIST_DELETE(&ipt_target, target);
1493 up(&ipt_mutex);
1494}
1495
1496int
1497ipt_register_match(struct ipt_match *match)
1498{
1499 int ret;
1500
1501 ret = down_interruptible(&ipt_mutex);
1502 if (ret != 0)
1503 return ret;
1504
1505 list_add(&match->list, &ipt_match);
1506 up(&ipt_mutex);
1507
1508 return ret;
1509}
1510
1511void
1512ipt_unregister_match(struct ipt_match *match)
1513{
1514 down(&ipt_mutex);
1515 LIST_DELETE(&ipt_match, match);
1516 up(&ipt_mutex);
1517}
1518
1519int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl)
1520{
1521 int ret;
1522 struct ipt_table_info *newinfo;
1523 static struct ipt_table_info bootstrap
1524 = { 0, 0, 0, { 0 }, { 0 }, { } }; 1215 = { 0, 0, 0, { 0 }, { 0 }, { } };
1525 void *loc_cpu_entry; 1216 void *loc_cpu_entry;
1526 1217
1527 newinfo = alloc_table_info(repl->size); 1218 newinfo = xt_alloc_table_info(repl->size);
1528 if (!newinfo) 1219 if (!newinfo)
1529 return -ENOMEM; 1220 return -ENOMEM;
1530 1221
@@ -1540,246 +1231,29 @@ int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl)
1540 repl->hook_entry, 1231 repl->hook_entry,
1541 repl->underflow); 1232 repl->underflow);
1542 if (ret != 0) { 1233 if (ret != 0) {
1543 free_table_info(newinfo); 1234 xt_free_table_info(newinfo);
1544 return ret; 1235 return ret;
1545 } 1236 }
1546 1237
1547 ret = down_interruptible(&ipt_mutex); 1238 if (xt_register_table(table, &bootstrap, newinfo) != 0) {
1548 if (ret != 0) { 1239 xt_free_table_info(newinfo);
1549 free_table_info(newinfo);
1550 return ret; 1240 return ret;
1551 } 1241 }
1552 1242
1553 /* Don't autoload: we'd eat our tail... */ 1243 return 0;
1554 if (list_named_find(&ipt_tables, table->name)) {
1555 ret = -EEXIST;
1556 goto free_unlock;
1557 }
1558
1559 /* Simplifies replace_table code. */
1560 table->private = &bootstrap;
1561 if (!replace_table(table, 0, newinfo, &ret))
1562 goto free_unlock;
1563
1564 duprintf("table->private->number = %u\n",
1565 table->private->number);
1566
1567 /* save number of initial entries */
1568 table->private->initial_entries = table->private->number;
1569
1570 rwlock_init(&table->lock);
1571 list_prepend(&ipt_tables, table);
1572
1573 unlock:
1574 up(&ipt_mutex);
1575 return ret;
1576
1577 free_unlock:
1578 free_table_info(newinfo);
1579 goto unlock;
1580} 1244}
1581 1245
1582void ipt_unregister_table(struct ipt_table *table) 1246void ipt_unregister_table(struct ipt_table *table)
1583{ 1247{
1248 struct xt_table_info *private;
1584 void *loc_cpu_entry; 1249 void *loc_cpu_entry;
1585 1250
1586 down(&ipt_mutex); 1251 private = xt_unregister_table(table);
1587 LIST_DELETE(&ipt_tables, table);
1588 up(&ipt_mutex);
1589 1252
1590 /* Decrease module usage counts and free resources */ 1253 /* Decrease module usage counts and free resources */
1591 loc_cpu_entry = table->private->entries[raw_smp_processor_id()]; 1254 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1592 IPT_ENTRY_ITERATE(loc_cpu_entry, table->private->size, 1255 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
1593 cleanup_entry, NULL); 1256 xt_free_table_info(private);
1594 free_table_info(table->private);
1595}
1596
1597/* Returns 1 if the port is matched by the range, 0 otherwise */
1598static inline int
1599port_match(u_int16_t min, u_int16_t max, u_int16_t port, int invert)
1600{
1601 int ret;
1602
1603 ret = (port >= min && port <= max) ^ invert;
1604 return ret;
1605}
1606
1607static int
1608tcp_find_option(u_int8_t option,
1609 const struct sk_buff *skb,
1610 unsigned int optlen,
1611 int invert,
1612 int *hotdrop)
1613{
1614 /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
1615 u_int8_t _opt[60 - sizeof(struct tcphdr)], *op;
1616 unsigned int i;
1617
1618 duprintf("tcp_match: finding option\n");
1619
1620 if (!optlen)
1621 return invert;
1622
1623 /* If we don't have the whole header, drop packet. */
1624 op = skb_header_pointer(skb,
1625 skb->nh.iph->ihl*4 + sizeof(struct tcphdr),
1626 optlen, _opt);
1627 if (op == NULL) {
1628 *hotdrop = 1;
1629 return 0;
1630 }
1631
1632 for (i = 0; i < optlen; ) {
1633 if (op[i] == option) return !invert;
1634 if (op[i] < 2) i++;
1635 else i += op[i+1]?:1;
1636 }
1637
1638 return invert;
1639}
1640
1641static int
1642tcp_match(const struct sk_buff *skb,
1643 const struct net_device *in,
1644 const struct net_device *out,
1645 const void *matchinfo,
1646 int offset,
1647 int *hotdrop)
1648{
1649 struct tcphdr _tcph, *th;
1650 const struct ipt_tcp *tcpinfo = matchinfo;
1651
1652 if (offset) {
1653 /* To quote Alan:
1654
1655 Don't allow a fragment of TCP 8 bytes in. Nobody normal
1656 causes this. Its a cracker trying to break in by doing a
1657 flag overwrite to pass the direction checks.
1658 */
1659 if (offset == 1) {
1660 duprintf("Dropping evil TCP offset=1 frag.\n");
1661 *hotdrop = 1;
1662 }
1663 /* Must not be a fragment. */
1664 return 0;
1665 }
1666
1667#define FWINVTCP(bool,invflg) ((bool) ^ !!(tcpinfo->invflags & invflg))
1668
1669 th = skb_header_pointer(skb, skb->nh.iph->ihl*4,
1670 sizeof(_tcph), &_tcph);
1671 if (th == NULL) {
1672 /* We've been asked to examine this packet, and we
1673 can't. Hence, no choice but to drop. */
1674 duprintf("Dropping evil TCP offset=0 tinygram.\n");
1675 *hotdrop = 1;
1676 return 0;
1677 }
1678
1679 if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
1680 ntohs(th->source),
1681 !!(tcpinfo->invflags & IPT_TCP_INV_SRCPT)))
1682 return 0;
1683 if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
1684 ntohs(th->dest),
1685 !!(tcpinfo->invflags & IPT_TCP_INV_DSTPT)))
1686 return 0;
1687 if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask)
1688 == tcpinfo->flg_cmp,
1689 IPT_TCP_INV_FLAGS))
1690 return 0;
1691 if (tcpinfo->option) {
1692 if (th->doff * 4 < sizeof(_tcph)) {
1693 *hotdrop = 1;
1694 return 0;
1695 }
1696 if (!tcp_find_option(tcpinfo->option, skb,
1697 th->doff*4 - sizeof(_tcph),
1698 tcpinfo->invflags & IPT_TCP_INV_OPTION,
1699 hotdrop))
1700 return 0;
1701 }
1702 return 1;
1703}
1704
1705/* Called when user tries to insert an entry of this type. */
1706static int
1707tcp_checkentry(const char *tablename,
1708 const struct ipt_ip *ip,
1709 void *matchinfo,
1710 unsigned int matchsize,
1711 unsigned int hook_mask)
1712{
1713 const struct ipt_tcp *tcpinfo = matchinfo;
1714
1715 /* Must specify proto == TCP, and no unknown invflags */
1716 return ip->proto == IPPROTO_TCP
1717 && !(ip->invflags & IPT_INV_PROTO)
1718 && matchsize == IPT_ALIGN(sizeof(struct ipt_tcp))
1719 && !(tcpinfo->invflags & ~IPT_TCP_INV_MASK);
1720}
1721
1722static int
1723udp_match(const struct sk_buff *skb,
1724 const struct net_device *in,
1725 const struct net_device *out,
1726 const void *matchinfo,
1727 int offset,
1728 int *hotdrop)
1729{
1730 struct udphdr _udph, *uh;
1731 const struct ipt_udp *udpinfo = matchinfo;
1732
1733 /* Must not be a fragment. */
1734 if (offset)
1735 return 0;
1736
1737 uh = skb_header_pointer(skb, skb->nh.iph->ihl*4,
1738 sizeof(_udph), &_udph);
1739 if (uh == NULL) {
1740 /* We've been asked to examine this packet, and we
1741 can't. Hence, no choice but to drop. */
1742 duprintf("Dropping evil UDP tinygram.\n");
1743 *hotdrop = 1;
1744 return 0;
1745 }
1746
1747 return port_match(udpinfo->spts[0], udpinfo->spts[1],
1748 ntohs(uh->source),
1749 !!(udpinfo->invflags & IPT_UDP_INV_SRCPT))
1750 && port_match(udpinfo->dpts[0], udpinfo->dpts[1],
1751 ntohs(uh->dest),
1752 !!(udpinfo->invflags & IPT_UDP_INV_DSTPT));
1753}
1754
1755/* Called when user tries to insert an entry of this type. */
1756static int
1757udp_checkentry(const char *tablename,
1758 const struct ipt_ip *ip,
1759 void *matchinfo,
1760 unsigned int matchinfosize,
1761 unsigned int hook_mask)
1762{
1763 const struct ipt_udp *udpinfo = matchinfo;
1764
1765 /* Must specify proto == UDP, and no unknown invflags */
1766 if (ip->proto != IPPROTO_UDP || (ip->invflags & IPT_INV_PROTO)) {
1767 duprintf("ipt_udp: Protocol %u != %u\n", ip->proto,
1768 IPPROTO_UDP);
1769 return 0;
1770 }
1771 if (matchinfosize != IPT_ALIGN(sizeof(struct ipt_udp))) {
1772 duprintf("ipt_udp: matchsize %u != %u\n",
1773 matchinfosize, IPT_ALIGN(sizeof(struct ipt_udp)));
1774 return 0;
1775 }
1776 if (udpinfo->invflags & ~IPT_UDP_INV_MASK) {
1777 duprintf("ipt_udp: unknown flags %X\n",
1778 udpinfo->invflags);
1779 return 0;
1780 }
1781
1782 return 1;
1783} 1257}
1784 1258
1785/* Returns 1 if the type and code is matched by the range, 0 otherwise */ 1259/* Returns 1 if the type and code is matched by the range, 0 otherwise */
@@ -1798,6 +1272,7 @@ icmp_match(const struct sk_buff *skb,
1798 const struct net_device *out, 1272 const struct net_device *out,
1799 const void *matchinfo, 1273 const void *matchinfo,
1800 int offset, 1274 int offset,
1275 unsigned int protoff,
1801 int *hotdrop) 1276 int *hotdrop)
1802{ 1277{
1803 struct icmphdr _icmph, *ic; 1278 struct icmphdr _icmph, *ic;
@@ -1807,8 +1282,7 @@ icmp_match(const struct sk_buff *skb,
1807 if (offset) 1282 if (offset)
1808 return 0; 1283 return 0;
1809 1284
1810 ic = skb_header_pointer(skb, skb->nh.iph->ihl*4, 1285 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
1811 sizeof(_icmph), &_icmph);
1812 if (ic == NULL) { 1286 if (ic == NULL) {
1813 /* We've been asked to examine this packet, and we 1287 /* We've been asked to examine this packet, and we
1814 * can't. Hence, no choice but to drop. 1288 * can't. Hence, no choice but to drop.
@@ -1828,11 +1302,12 @@ icmp_match(const struct sk_buff *skb,
1828/* Called when user tries to insert an entry of this type. */ 1302/* Called when user tries to insert an entry of this type. */
1829static int 1303static int
1830icmp_checkentry(const char *tablename, 1304icmp_checkentry(const char *tablename,
1831 const struct ipt_ip *ip, 1305 const void *info,
1832 void *matchinfo, 1306 void *matchinfo,
1833 unsigned int matchsize, 1307 unsigned int matchsize,
1834 unsigned int hook_mask) 1308 unsigned int hook_mask)
1835{ 1309{
1310 const struct ipt_ip *ip = info;
1836 const struct ipt_icmp *icmpinfo = matchinfo; 1311 const struct ipt_icmp *icmpinfo = matchinfo;
1837 1312
1838 /* Must specify proto == ICMP, and no unknown invflags */ 1313 /* Must specify proto == ICMP, and no unknown invflags */
@@ -1862,123 +1337,22 @@ static struct nf_sockopt_ops ipt_sockopts = {
1862 .get = do_ipt_get_ctl, 1337 .get = do_ipt_get_ctl,
1863}; 1338};
1864 1339
1865static struct ipt_match tcp_matchstruct = {
1866 .name = "tcp",
1867 .match = &tcp_match,
1868 .checkentry = &tcp_checkentry,
1869};
1870
1871static struct ipt_match udp_matchstruct = {
1872 .name = "udp",
1873 .match = &udp_match,
1874 .checkentry = &udp_checkentry,
1875};
1876
1877static struct ipt_match icmp_matchstruct = { 1340static struct ipt_match icmp_matchstruct = {
1878 .name = "icmp", 1341 .name = "icmp",
1879 .match = &icmp_match, 1342 .match = &icmp_match,
1880 .checkentry = &icmp_checkentry, 1343 .checkentry = &icmp_checkentry,
1881}; 1344};
1882 1345
1883#ifdef CONFIG_PROC_FS
1884static inline int print_name(const char *i,
1885 off_t start_offset, char *buffer, int length,
1886 off_t *pos, unsigned int *count)
1887{
1888 if ((*count)++ >= start_offset) {
1889 unsigned int namelen;
1890
1891 namelen = sprintf(buffer + *pos, "%s\n",
1892 i + sizeof(struct list_head));
1893 if (*pos + namelen > length) {
1894 /* Stop iterating */
1895 return 1;
1896 }
1897 *pos += namelen;
1898 }
1899 return 0;
1900}
1901
1902static inline int print_target(const struct ipt_target *t,
1903 off_t start_offset, char *buffer, int length,
1904 off_t *pos, unsigned int *count)
1905{
1906 if (t == &ipt_standard_target || t == &ipt_error_target)
1907 return 0;
1908 return print_name((char *)t, start_offset, buffer, length, pos, count);
1909}
1910
1911static int ipt_get_tables(char *buffer, char **start, off_t offset, int length)
1912{
1913 off_t pos = 0;
1914 unsigned int count = 0;
1915
1916 if (down_interruptible(&ipt_mutex) != 0)
1917 return 0;
1918
1919 LIST_FIND(&ipt_tables, print_name, void *,
1920 offset, buffer, length, &pos, &count);
1921
1922 up(&ipt_mutex);
1923
1924 /* `start' hack - see fs/proc/generic.c line ~105 */
1925 *start=(char *)((unsigned long)count-offset);
1926 return pos;
1927}
1928
1929static int ipt_get_targets(char *buffer, char **start, off_t offset, int length)
1930{
1931 off_t pos = 0;
1932 unsigned int count = 0;
1933
1934 if (down_interruptible(&ipt_mutex) != 0)
1935 return 0;
1936
1937 LIST_FIND(&ipt_target, print_target, struct ipt_target *,
1938 offset, buffer, length, &pos, &count);
1939
1940 up(&ipt_mutex);
1941
1942 *start = (char *)((unsigned long)count - offset);
1943 return pos;
1944}
1945
1946static int ipt_get_matches(char *buffer, char **start, off_t offset, int length)
1947{
1948 off_t pos = 0;
1949 unsigned int count = 0;
1950
1951 if (down_interruptible(&ipt_mutex) != 0)
1952 return 0;
1953
1954 LIST_FIND(&ipt_match, print_name, void *,
1955 offset, buffer, length, &pos, &count);
1956
1957 up(&ipt_mutex);
1958
1959 *start = (char *)((unsigned long)count - offset);
1960 return pos;
1961}
1962
1963static const struct { char *name; get_info_t *get_info; } ipt_proc_entry[] =
1964{ { "ip_tables_names", ipt_get_tables },
1965 { "ip_tables_targets", ipt_get_targets },
1966 { "ip_tables_matches", ipt_get_matches },
1967 { NULL, NULL} };
1968#endif /*CONFIG_PROC_FS*/
1969
1970static int __init init(void) 1346static int __init init(void)
1971{ 1347{
1972 int ret; 1348 int ret;
1973 1349
1350 xt_proto_init(AF_INET);
1351
1974 /* Noone else will be downing sem now, so we won't sleep */ 1352 /* Noone else will be downing sem now, so we won't sleep */
1975 down(&ipt_mutex); 1353 xt_register_target(AF_INET, &ipt_standard_target);
1976 list_append(&ipt_target, &ipt_standard_target); 1354 xt_register_target(AF_INET, &ipt_error_target);
1977 list_append(&ipt_target, &ipt_error_target); 1355 xt_register_match(AF_INET, &icmp_matchstruct);
1978 list_append(&ipt_match, &tcp_matchstruct);
1979 list_append(&ipt_match, &udp_matchstruct);
1980 list_append(&ipt_match, &icmp_matchstruct);
1981 up(&ipt_mutex);
1982 1356
1983 /* Register setsockopt */ 1357 /* Register setsockopt */
1984 ret = nf_register_sockopt(&ipt_sockopts); 1358 ret = nf_register_sockopt(&ipt_sockopts);
@@ -1987,49 +1361,23 @@ static int __init init(void)
1987 return ret; 1361 return ret;
1988 } 1362 }
1989 1363
1990#ifdef CONFIG_PROC_FS 1364 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
1991 {
1992 struct proc_dir_entry *proc;
1993 int i;
1994
1995 for (i = 0; ipt_proc_entry[i].name; i++) {
1996 proc = proc_net_create(ipt_proc_entry[i].name, 0,
1997 ipt_proc_entry[i].get_info);
1998 if (!proc) {
1999 while (--i >= 0)
2000 proc_net_remove(ipt_proc_entry[i].name);
2001 nf_unregister_sockopt(&ipt_sockopts);
2002 return -ENOMEM;
2003 }
2004 proc->owner = THIS_MODULE;
2005 }
2006 }
2007#endif
2008
2009 printk("ip_tables: (C) 2000-2002 Netfilter core team\n");
2010 return 0; 1365 return 0;
2011} 1366}
2012 1367
2013static void __exit fini(void) 1368static void __exit fini(void)
2014{ 1369{
2015 nf_unregister_sockopt(&ipt_sockopts); 1370 nf_unregister_sockopt(&ipt_sockopts);
2016#ifdef CONFIG_PROC_FS 1371
2017 { 1372 xt_unregister_match(AF_INET, &icmp_matchstruct);
2018 int i; 1373 xt_unregister_target(AF_INET, &ipt_error_target);
2019 for (i = 0; ipt_proc_entry[i].name; i++) 1374 xt_unregister_target(AF_INET, &ipt_standard_target);
2020 proc_net_remove(ipt_proc_entry[i].name); 1375
2021 } 1376 xt_proto_fini(AF_INET);
2022#endif
2023} 1377}
2024 1378
2025EXPORT_SYMBOL(ipt_register_table); 1379EXPORT_SYMBOL(ipt_register_table);
2026EXPORT_SYMBOL(ipt_unregister_table); 1380EXPORT_SYMBOL(ipt_unregister_table);
2027EXPORT_SYMBOL(ipt_register_match);
2028EXPORT_SYMBOL(ipt_unregister_match);
2029EXPORT_SYMBOL(ipt_do_table); 1381EXPORT_SYMBOL(ipt_do_table);
2030EXPORT_SYMBOL(ipt_register_target);
2031EXPORT_SYMBOL(ipt_unregister_target);
2032EXPORT_SYMBOL(ipt_find_target);
2033
2034module_init(init); 1382module_init(init);
2035module_exit(fini); 1383module_exit(fini);