diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-12 16:31:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-12 16:31:22 -0400 |
commit | e1bd2ac5a6b7a8b625e40c9e9f8b6dea4cf22f85 (patch) | |
tree | 9366e9fb481da2c7195ca3f2bafeffebbf001363 /net/ipv4 | |
parent | 0b9062f6b57a87f22309c6b920a51aaa66ce2a13 (diff) | |
parent | 15028aad00ddf241581fbe74a02ec89cbb28d35d (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (183 commits)
[TG3]: Update version to 3.78.
[TG3]: Add missing NVRAM strapping.
[TG3]: Enable auto MDI.
[TG3]: Fix the polarity bit.
[TG3]: Fix irq_sync race condition.
[NET_SCHED]: ematch: module autoloading
[TCP]: tcp probe wraparound handling and other changes
[RTNETLINK]: rtnl_link: allow specifying initial device address
[RTNETLINK]: rtnl_link API simplification
[VLAN]: Fix MAC address handling
[ETH]: Validate address in eth_mac_addr
[NET]: Fix races in net_rx_action vs netpoll.
[AF_UNIX]: Rewrite garbage collector, fixes race.
[NETFILTER]: {ip, nf}_conntrack_sctp: fix remotely triggerable NULL ptr dereference (CVE-2007-2876)
[NET]: Make all initialized struct seq_operations const.
[UDP]: Fix length check.
[IPV6]: Remove unneeded pointer idev from addrconf_cleanup().
[DECNET]: Another unnecessary net/tcp.h inclusion in net/dn.h
[IPV6]: Make IPV6_{RECV,2292}RTHDR boolean options.
[IPV6]: Do not send RH0 anymore.
...
Fixed up trivial conflict in Documentation/feature-removal-schedule.txt
manually.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net/ipv4')
65 files changed, 1113 insertions, 2170 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 010fbb2d45e9..fb7909774254 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -116,48 +116,6 @@ config IP_ROUTE_MULTIPATH | |||
116 | equal "cost" and chooses one of them in a non-deterministic fashion | 116 | equal "cost" and chooses one of them in a non-deterministic fashion |
117 | if a matching packet arrives. | 117 | if a matching packet arrives. |
118 | 118 | ||
119 | config IP_ROUTE_MULTIPATH_CACHED | ||
120 | bool "IP: equal cost multipath with caching support (EXPERIMENTAL)" | ||
121 | depends on IP_ROUTE_MULTIPATH | ||
122 | help | ||
123 | Normally, equal cost multipath routing is not supported by the | ||
124 | routing cache. If you say Y here, alternative routes are cached | ||
125 | and on cache lookup a route is chosen in a configurable fashion. | ||
126 | |||
127 | If unsure, say N. | ||
128 | |||
129 | config IP_ROUTE_MULTIPATH_RR | ||
130 | tristate "MULTIPATH: round robin algorithm" | ||
131 | depends on IP_ROUTE_MULTIPATH_CACHED | ||
132 | help | ||
133 | Multipath routes are chosen according to Round Robin | ||
134 | |||
135 | config IP_ROUTE_MULTIPATH_RANDOM | ||
136 | tristate "MULTIPATH: random algorithm" | ||
137 | depends on IP_ROUTE_MULTIPATH_CACHED | ||
138 | help | ||
139 | Multipath routes are chosen in a random fashion. Actually, | ||
140 | there is no weight for a route. The advantage of this policy | ||
141 | is that it is implemented stateless and therefore introduces only | ||
142 | a very small delay. | ||
143 | |||
144 | config IP_ROUTE_MULTIPATH_WRANDOM | ||
145 | tristate "MULTIPATH: weighted random algorithm" | ||
146 | depends on IP_ROUTE_MULTIPATH_CACHED | ||
147 | help | ||
148 | Multipath routes are chosen in a weighted random fashion. | ||
149 | The per route weights are the weights visible via ip route 2. As the | ||
150 | corresponding state management introduces some overhead routing delay | ||
151 | is increased. | ||
152 | |||
153 | config IP_ROUTE_MULTIPATH_DRR | ||
154 | tristate "MULTIPATH: interface round robin algorithm" | ||
155 | depends on IP_ROUTE_MULTIPATH_CACHED | ||
156 | help | ||
157 | Connections are distributed in a round robin fashion over the | ||
158 | available interfaces. This policy makes sense if the connections | ||
159 | should be primarily distributed on interfaces and not on routes. | ||
160 | |||
161 | config IP_ROUTE_VERBOSE | 119 | config IP_ROUTE_VERBOSE |
162 | bool "IP: verbose route monitoring" | 120 | bool "IP: verbose route monitoring" |
163 | depends on IP_ADVANCED_ROUTER | 121 | depends on IP_ADVANCED_ROUTER |
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 4ff6c151d7f3..fbf1674e0c2a 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile | |||
@@ -29,14 +29,9 @@ obj-$(CONFIG_INET_TUNNEL) += tunnel4.o | |||
29 | obj-$(CONFIG_INET_XFRM_MODE_TRANSPORT) += xfrm4_mode_transport.o | 29 | obj-$(CONFIG_INET_XFRM_MODE_TRANSPORT) += xfrm4_mode_transport.o |
30 | obj-$(CONFIG_INET_XFRM_MODE_TUNNEL) += xfrm4_mode_tunnel.o | 30 | obj-$(CONFIG_INET_XFRM_MODE_TUNNEL) += xfrm4_mode_tunnel.o |
31 | obj-$(CONFIG_IP_PNP) += ipconfig.o | 31 | obj-$(CONFIG_IP_PNP) += ipconfig.o |
32 | obj-$(CONFIG_IP_ROUTE_MULTIPATH_RR) += multipath_rr.o | ||
33 | obj-$(CONFIG_IP_ROUTE_MULTIPATH_RANDOM) += multipath_random.o | ||
34 | obj-$(CONFIG_IP_ROUTE_MULTIPATH_WRANDOM) += multipath_wrandom.o | ||
35 | obj-$(CONFIG_IP_ROUTE_MULTIPATH_DRR) += multipath_drr.o | ||
36 | obj-$(CONFIG_NETFILTER) += netfilter.o netfilter/ | 32 | obj-$(CONFIG_NETFILTER) += netfilter.o netfilter/ |
37 | obj-$(CONFIG_IP_VS) += ipvs/ | 33 | obj-$(CONFIG_IP_VS) += ipvs/ |
38 | obj-$(CONFIG_INET_DIAG) += inet_diag.o | 34 | obj-$(CONFIG_INET_DIAG) += inet_diag.o |
39 | obj-$(CONFIG_IP_ROUTE_MULTIPATH_CACHED) += multipath.o | ||
40 | obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o | 35 | obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o |
41 | obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o | 36 | obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o |
42 | obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o | 37 | obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 041fba3fa0aa..06c08e5740fb 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1170,6 +1170,9 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | |||
1170 | int ihl; | 1170 | int ihl; |
1171 | int id; | 1171 | int id; |
1172 | 1172 | ||
1173 | if (!(features & NETIF_F_V4_CSUM)) | ||
1174 | features &= ~NETIF_F_SG; | ||
1175 | |||
1173 | if (unlikely(skb_shinfo(skb)->gso_type & | 1176 | if (unlikely(skb_shinfo(skb)->gso_type & |
1174 | ~(SKB_GSO_TCPV4 | | 1177 | ~(SKB_GSO_TCPV4 | |
1175 | SKB_GSO_UDP | | 1178 | SKB_GSO_UDP | |
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 6da8ff597ad3..7a23e59c374a 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
@@ -339,3 +339,4 @@ static void __exit ah4_fini(void) | |||
339 | module_init(ah4_init); | 339 | module_init(ah4_init); |
340 | module_exit(ah4_fini); | 340 | module_exit(ah4_fini); |
341 | MODULE_LICENSE("GPL"); | 341 | MODULE_LICENSE("GPL"); |
342 | MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH); | ||
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 47c95e8ef045..98767a4f1185 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -481,3 +481,4 @@ static void __exit esp4_fini(void) | |||
481 | module_init(esp4_init); | 481 | module_init(esp4_init); |
482 | module_exit(esp4_fini); | 482 | module_exit(esp4_fini); |
483 | MODULE_LICENSE("GPL"); | 483 | MODULE_LICENSE("GPL"); |
484 | MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP); | ||
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 311d633f7f39..2eb909be8041 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -453,7 +453,6 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX+1] = { | |||
453 | [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, | 453 | [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, |
454 | [RTA_PROTOINFO] = { .type = NLA_U32 }, | 454 | [RTA_PROTOINFO] = { .type = NLA_U32 }, |
455 | [RTA_FLOW] = { .type = NLA_U32 }, | 455 | [RTA_FLOW] = { .type = NLA_U32 }, |
456 | [RTA_MP_ALGO] = { .type = NLA_U32 }, | ||
457 | }; | 456 | }; |
458 | 457 | ||
459 | static int rtm_to_fib_config(struct sk_buff *skb, struct nlmsghdr *nlh, | 458 | static int rtm_to_fib_config(struct sk_buff *skb, struct nlmsghdr *nlh, |
@@ -515,9 +514,6 @@ static int rtm_to_fib_config(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
515 | case RTA_FLOW: | 514 | case RTA_FLOW: |
516 | cfg->fc_flow = nla_get_u32(attr); | 515 | cfg->fc_flow = nla_get_u32(attr); |
517 | break; | 516 | break; |
518 | case RTA_MP_ALGO: | ||
519 | cfg->fc_mp_alg = nla_get_u32(attr); | ||
520 | break; | ||
521 | case RTA_TABLE: | 517 | case RTA_TABLE: |
522 | cfg->fc_table = nla_get_u32(attr); | 518 | cfg->fc_table = nla_get_u32(attr); |
523 | break; | 519 | break; |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index bb94550d95c3..c434119deb52 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include <net/tcp.h> | 42 | #include <net/tcp.h> |
43 | #include <net/sock.h> | 43 | #include <net/sock.h> |
44 | #include <net/ip_fib.h> | 44 | #include <net/ip_fib.h> |
45 | #include <net/ip_mp_alg.h> | ||
46 | #include <net/netlink.h> | 45 | #include <net/netlink.h> |
47 | #include <net/nexthop.h> | 46 | #include <net/nexthop.h> |
48 | 47 | ||
@@ -697,13 +696,6 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
697 | goto err_inval; | 696 | goto err_inval; |
698 | } | 697 | } |
699 | #endif | 698 | #endif |
700 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED | ||
701 | if (cfg->fc_mp_alg) { | ||
702 | if (cfg->fc_mp_alg < IP_MP_ALG_NONE || | ||
703 | cfg->fc_mp_alg > IP_MP_ALG_MAX) | ||
704 | goto err_inval; | ||
705 | } | ||
706 | #endif | ||
707 | 699 | ||
708 | err = -ENOBUFS; | 700 | err = -ENOBUFS; |
709 | if (fib_info_cnt >= fib_hash_size) { | 701 | if (fib_info_cnt >= fib_hash_size) { |
@@ -791,10 +783,6 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
791 | #endif | 783 | #endif |
792 | } | 784 | } |
793 | 785 | ||
794 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED | ||
795 | fi->fib_mp_alg = cfg->fc_mp_alg; | ||
796 | #endif | ||
797 | |||
798 | if (fib_props[cfg->fc_type].error) { | 786 | if (fib_props[cfg->fc_type].error) { |
799 | if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) | 787 | if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) |
800 | goto err_inval; | 788 | goto err_inval; |
@@ -940,10 +928,6 @@ out_fill_res: | |||
940 | res->type = fa->fa_type; | 928 | res->type = fa->fa_type; |
941 | res->scope = fa->fa_scope; | 929 | res->scope = fa->fa_scope; |
942 | res->fi = fa->fa_info; | 930 | res->fi = fa->fa_info; |
943 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED | ||
944 | res->netmask = mask; | ||
945 | res->network = zone & inet_make_mask(prefixlen); | ||
946 | #endif | ||
947 | atomic_inc(&res->fi->fib_clntref); | 931 | atomic_inc(&res->fi->fib_clntref); |
948 | return 0; | 932 | return 0; |
949 | } | 933 | } |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 63282934725e..5c14ed63e56c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -809,7 +809,8 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
809 | 809 | ||
810 | max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen; | 810 | max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen; |
811 | 811 | ||
812 | if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) { | 812 | if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| |
813 | (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { | ||
813 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); | 814 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); |
814 | if (!new_skb) { | 815 | if (!new_skb) { |
815 | ip_rt_put(rt); | 816 | ip_rt_put(rt); |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 34ea4547ebbe..c9e2b5e6305e 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -399,6 +399,10 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
399 | to->tc_index = from->tc_index; | 399 | to->tc_index = from->tc_index; |
400 | #endif | 400 | #endif |
401 | nf_copy(to, from); | 401 | nf_copy(to, from); |
402 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | ||
403 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | ||
404 | to->nf_trace = from->nf_trace; | ||
405 | #endif | ||
402 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) | 406 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) |
403 | to->ipvs_property = from->ipvs_property; | 407 | to->ipvs_property = from->ipvs_property; |
404 | #endif | 408 | #endif |
@@ -837,7 +841,7 @@ int ip_append_data(struct sock *sk, | |||
837 | */ | 841 | */ |
838 | if (transhdrlen && | 842 | if (transhdrlen && |
839 | length + fragheaderlen <= mtu && | 843 | length + fragheaderlen <= mtu && |
840 | rt->u.dst.dev->features & NETIF_F_ALL_CSUM && | 844 | rt->u.dst.dev->features & NETIF_F_V4_CSUM && |
841 | !exthdrlen) | 845 | !exthdrlen) |
842 | csummode = CHECKSUM_PARTIAL; | 846 | csummode = CHECKSUM_PARTIAL; |
843 | 847 | ||
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index ab86137c71d2..e787044a8514 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c | |||
@@ -485,3 +485,4 @@ MODULE_LICENSE("GPL"); | |||
485 | MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173"); | 485 | MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173"); |
486 | MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); | 486 | MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); |
487 | 487 | ||
488 | MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_COMP); | ||
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index ebd2f2d532f6..396437242a1b 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -595,7 +595,8 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
595 | */ | 595 | */ |
596 | max_headroom = (LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr)); | 596 | max_headroom = (LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr)); |
597 | 597 | ||
598 | if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) { | 598 | if (skb_headroom(skb) < max_headroom || skb_shared(skb) || |
599 | (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { | ||
599 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); | 600 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); |
600 | if (!new_skb) { | 601 | if (!new_skb) { |
601 | ip_rt_put(rt); | 602 | ip_rt_put(rt); |
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c index 15ad5dd2d984..8d6901d4e94f 100644 --- a/net/ipv4/ipvs/ip_vs_app.c +++ b/net/ipv4/ipvs/ip_vs_app.c | |||
@@ -549,7 +549,7 @@ static int ip_vs_app_seq_show(struct seq_file *seq, void *v) | |||
549 | return 0; | 549 | return 0; |
550 | } | 550 | } |
551 | 551 | ||
552 | static struct seq_operations ip_vs_app_seq_ops = { | 552 | static const struct seq_operations ip_vs_app_seq_ops = { |
553 | .start = ip_vs_app_seq_start, | 553 | .start = ip_vs_app_seq_start, |
554 | .next = ip_vs_app_seq_next, | 554 | .next = ip_vs_app_seq_next, |
555 | .stop = ip_vs_app_seq_stop, | 555 | .stop = ip_vs_app_seq_stop, |
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c index 7018f97c75dc..3b446b1a6b9c 100644 --- a/net/ipv4/ipvs/ip_vs_conn.c +++ b/net/ipv4/ipvs/ip_vs_conn.c | |||
@@ -745,7 +745,7 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v) | |||
745 | return 0; | 745 | return 0; |
746 | } | 746 | } |
747 | 747 | ||
748 | static struct seq_operations ip_vs_conn_seq_ops = { | 748 | static const struct seq_operations ip_vs_conn_seq_ops = { |
749 | .start = ip_vs_conn_seq_start, | 749 | .start = ip_vs_conn_seq_start, |
750 | .next = ip_vs_conn_seq_next, | 750 | .next = ip_vs_conn_seq_next, |
751 | .stop = ip_vs_conn_seq_stop, | 751 | .stop = ip_vs_conn_seq_stop, |
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index 68fe1d4d0210..e1052bcf4ed1 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c | |||
@@ -1783,7 +1783,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) | |||
1783 | return 0; | 1783 | return 0; |
1784 | } | 1784 | } |
1785 | 1785 | ||
1786 | static struct seq_operations ip_vs_info_seq_ops = { | 1786 | static const struct seq_operations ip_vs_info_seq_ops = { |
1787 | .start = ip_vs_info_seq_start, | 1787 | .start = ip_vs_info_seq_start, |
1788 | .next = ip_vs_info_seq_next, | 1788 | .next = ip_vs_info_seq_next, |
1789 | .stop = ip_vs_info_seq_stop, | 1789 | .stop = ip_vs_info_seq_stop, |
diff --git a/net/ipv4/multipath.c b/net/ipv4/multipath.c deleted file mode 100644 index 4e9ca7c76407..000000000000 --- a/net/ipv4/multipath.c +++ /dev/null | |||
@@ -1,55 +0,0 @@ | |||
1 | /* multipath.c: IPV4 multipath algorithm support. | ||
2 | * | ||
3 | * Copyright (C) 2004, 2005 Einar Lueck <elueck@de.ibm.com> | ||
4 | * Copyright (C) 2005 David S. Miller <davem@davemloft.net> | ||
5 | */ | ||
6 | |||
7 | #include <linux/module.h> | ||
8 | #include <linux/errno.h> | ||
9 | #include <linux/netdevice.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | |||
12 | #include <net/ip_mp_alg.h> | ||
13 | |||
14 | static DEFINE_SPINLOCK(alg_table_lock); | ||
15 | struct ip_mp_alg_ops *ip_mp_alg_table[IP_MP_ALG_MAX + 1]; | ||
16 | |||
17 | int multipath_alg_register(struct ip_mp_alg_ops *ops, enum ip_mp_alg n) | ||
18 | { | ||
19 | struct ip_mp_alg_ops **slot; | ||
20 | int err; | ||
21 | |||
22 | if (n < IP_MP_ALG_NONE || n > IP_MP_ALG_MAX || | ||
23 | !ops->mp_alg_select_route) | ||
24 | return -EINVAL; | ||
25 | |||
26 | spin_lock(&alg_table_lock); | ||
27 | slot = &ip_mp_alg_table[n]; | ||
28 | if (*slot != NULL) { | ||
29 | err = -EBUSY; | ||
30 | } else { | ||
31 | *slot = ops; | ||
32 | err = 0; | ||
33 | } | ||
34 | spin_unlock(&alg_table_lock); | ||
35 | |||
36 | return err; | ||
37 | } | ||
38 | EXPORT_SYMBOL(multipath_alg_register); | ||
39 | |||
40 | void multipath_alg_unregister(struct ip_mp_alg_ops *ops, enum ip_mp_alg n) | ||
41 | { | ||
42 | struct ip_mp_alg_ops **slot; | ||
43 | |||
44 | if (n < IP_MP_ALG_NONE || n > IP_MP_ALG_MAX) | ||
45 | return; | ||
46 | |||
47 | spin_lock(&alg_table_lock); | ||
48 | slot = &ip_mp_alg_table[n]; | ||
49 | if (*slot == ops) | ||
50 | *slot = NULL; | ||
51 | spin_unlock(&alg_table_lock); | ||
52 | |||
53 | synchronize_net(); | ||
54 | } | ||
55 | EXPORT_SYMBOL(multipath_alg_unregister); | ||
diff --git a/net/ipv4/multipath_drr.c b/net/ipv4/multipath_drr.c deleted file mode 100644 index b03c5ca2c823..000000000000 --- a/net/ipv4/multipath_drr.c +++ /dev/null | |||
@@ -1,249 +0,0 @@ | |||
1 | /* | ||
2 | * Device round robin policy for multipath. | ||
3 | * | ||
4 | * | ||
5 | * Version: $Id: multipath_drr.c,v 1.1.2.1 2004/09/16 07:42:34 elueck Exp $ | ||
6 | * | ||
7 | * Authors: Einar Lueck <elueck@de.ibm.com><lkml@einar-lueck.de> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version | ||
12 | * 2 of the License, or (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <asm/system.h> | ||
16 | #include <asm/uaccess.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/timer.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/fcntl.h> | ||
23 | #include <linux/stat.h> | ||
24 | #include <linux/socket.h> | ||
25 | #include <linux/in.h> | ||
26 | #include <linux/inet.h> | ||
27 | #include <linux/netdevice.h> | ||
28 | #include <linux/inetdevice.h> | ||
29 | #include <linux/igmp.h> | ||
30 | #include <linux/proc_fs.h> | ||
31 | #include <linux/seq_file.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/mroute.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <net/ip.h> | ||
36 | #include <net/protocol.h> | ||
37 | #include <linux/skbuff.h> | ||
38 | #include <net/sock.h> | ||
39 | #include <net/icmp.h> | ||
40 | #include <net/udp.h> | ||
41 | #include <net/raw.h> | ||
42 | #include <linux/notifier.h> | ||
43 | #include <linux/if_arp.h> | ||
44 | #include <linux/netfilter_ipv4.h> | ||
45 | #include <net/ipip.h> | ||
46 | #include <net/checksum.h> | ||
47 | #include <net/ip_mp_alg.h> | ||
48 | |||
49 | struct multipath_device { | ||
50 | int ifi; /* interface index of device */ | ||
51 | atomic_t usecount; | ||
52 | int allocated; | ||
53 | }; | ||
54 | |||
55 | #define MULTIPATH_MAX_DEVICECANDIDATES 10 | ||
56 | |||
57 | static struct multipath_device state[MULTIPATH_MAX_DEVICECANDIDATES]; | ||
58 | static DEFINE_SPINLOCK(state_lock); | ||
59 | |||
60 | static int inline __multipath_findslot(void) | ||
61 | { | ||
62 | int i; | ||
63 | |||
64 | for (i = 0; i < MULTIPATH_MAX_DEVICECANDIDATES; i++) { | ||
65 | if (state[i].allocated == 0) | ||
66 | return i; | ||
67 | } | ||
68 | return -1; | ||
69 | } | ||
70 | |||
71 | static int inline __multipath_finddev(int ifindex) | ||
72 | { | ||
73 | int i; | ||
74 | |||
75 | for (i = 0; i < MULTIPATH_MAX_DEVICECANDIDATES; i++) { | ||
76 | if (state[i].allocated != 0 && | ||
77 | state[i].ifi == ifindex) | ||
78 | return i; | ||
79 | } | ||
80 | return -1; | ||
81 | } | ||
82 | |||
83 | static int drr_dev_event(struct notifier_block *this, | ||
84 | unsigned long event, void *ptr) | ||
85 | { | ||
86 | struct net_device *dev = ptr; | ||
87 | int devidx; | ||
88 | |||
89 | switch (event) { | ||
90 | case NETDEV_UNREGISTER: | ||
91 | case NETDEV_DOWN: | ||
92 | spin_lock_bh(&state_lock); | ||
93 | |||
94 | devidx = __multipath_finddev(dev->ifindex); | ||
95 | if (devidx != -1) { | ||
96 | state[devidx].allocated = 0; | ||
97 | state[devidx].ifi = 0; | ||
98 | atomic_set(&state[devidx].usecount, 0); | ||
99 | } | ||
100 | |||
101 | spin_unlock_bh(&state_lock); | ||
102 | break; | ||
103 | } | ||
104 | |||
105 | return NOTIFY_DONE; | ||
106 | } | ||
107 | |||
108 | static struct notifier_block drr_dev_notifier = { | ||
109 | .notifier_call = drr_dev_event, | ||
110 | }; | ||
111 | |||
112 | |||
113 | static void drr_safe_inc(atomic_t *usecount) | ||
114 | { | ||
115 | int n; | ||
116 | |||
117 | atomic_inc(usecount); | ||
118 | |||
119 | n = atomic_read(usecount); | ||
120 | if (n <= 0) { | ||
121 | int i; | ||
122 | |||
123 | spin_lock_bh(&state_lock); | ||
124 | |||
125 | for (i = 0; i < MULTIPATH_MAX_DEVICECANDIDATES; i++) | ||
126 | atomic_set(&state[i].usecount, 0); | ||
127 | |||
128 | spin_unlock_bh(&state_lock); | ||
129 | } | ||
130 | } | ||
131 | |||
132 | static void drr_select_route(const struct flowi *flp, | ||
133 | struct rtable *first, struct rtable **rp) | ||
134 | { | ||
135 | struct rtable *nh, *result, *cur_min; | ||
136 | int min_usecount = -1; | ||
137 | int devidx = -1; | ||
138 | int cur_min_devidx = -1; | ||
139 | |||
140 | /* 1. make sure all alt. nexthops have the same GC related data */ | ||
141 | /* 2. determine the new candidate to be returned */ | ||
142 | result = NULL; | ||
143 | cur_min = NULL; | ||
144 | for (nh = rcu_dereference(first); nh; | ||
145 | nh = rcu_dereference(nh->u.dst.rt_next)) { | ||
146 | if ((nh->u.dst.flags & DST_BALANCED) != 0 && | ||
147 | multipath_comparekeys(&nh->fl, flp)) { | ||
148 | int nh_ifidx = nh->u.dst.dev->ifindex; | ||
149 | |||
150 | nh->u.dst.lastuse = jiffies; | ||
151 | nh->u.dst.__use++; | ||
152 | if (result != NULL) | ||
153 | continue; | ||
154 | |||
155 | /* search for the output interface */ | ||
156 | |||
157 | /* this is not SMP safe, only add/remove are | ||
158 | * SMP safe as wrong usecount updates have no big | ||
159 | * impact | ||
160 | */ | ||
161 | devidx = __multipath_finddev(nh_ifidx); | ||
162 | if (devidx == -1) { | ||
163 | /* add the interface to the array | ||
164 | * SMP safe | ||
165 | */ | ||
166 | spin_lock_bh(&state_lock); | ||
167 | |||
168 | /* due to SMP: search again */ | ||
169 | devidx = __multipath_finddev(nh_ifidx); | ||
170 | if (devidx == -1) { | ||
171 | /* add entry for device */ | ||
172 | devidx = __multipath_findslot(); | ||
173 | if (devidx == -1) { | ||
174 | /* unlikely but possible */ | ||
175 | continue; | ||
176 | } | ||
177 | |||
178 | state[devidx].allocated = 1; | ||
179 | state[devidx].ifi = nh_ifidx; | ||
180 | atomic_set(&state[devidx].usecount, 0); | ||
181 | min_usecount = 0; | ||
182 | } | ||
183 | |||
184 | spin_unlock_bh(&state_lock); | ||
185 | } | ||
186 | |||
187 | if (min_usecount == 0) { | ||
188 | /* if the device has not been used it is | ||
189 | * the primary target | ||
190 | */ | ||
191 | drr_safe_inc(&state[devidx].usecount); | ||
192 | result = nh; | ||
193 | } else { | ||
194 | int count = | ||
195 | atomic_read(&state[devidx].usecount); | ||
196 | |||
197 | if (min_usecount == -1 || | ||
198 | count < min_usecount) { | ||
199 | cur_min = nh; | ||
200 | cur_min_devidx = devidx; | ||
201 | min_usecount = count; | ||
202 | } | ||
203 | } | ||
204 | } | ||
205 | } | ||
206 | |||
207 | if (!result) { | ||
208 | if (cur_min) { | ||
209 | drr_safe_inc(&state[cur_min_devidx].usecount); | ||
210 | result = cur_min; | ||
211 | } else { | ||
212 | result = first; | ||
213 | } | ||
214 | } | ||
215 | |||
216 | *rp = result; | ||
217 | } | ||
218 | |||
219 | static struct ip_mp_alg_ops drr_ops = { | ||
220 | .mp_alg_select_route = drr_select_route, | ||
221 | }; | ||
222 | |||
223 | static int __init drr_init(void) | ||
224 | { | ||
225 | int err = register_netdevice_notifier(&drr_dev_notifier); | ||
226 | |||
227 | if (err) | ||
228 | return err; | ||
229 | |||
230 | err = multipath_alg_register(&drr_ops, IP_MP_ALG_DRR); | ||
231 | if (err) | ||
232 | goto fail; | ||
233 | |||
234 | return 0; | ||
235 | |||
236 | fail: | ||
237 | unregister_netdevice_notifier(&drr_dev_notifier); | ||
238 | return err; | ||
239 | } | ||
240 | |||
241 | static void __exit drr_exit(void) | ||
242 | { | ||
243 | unregister_netdevice_notifier(&drr_dev_notifier); | ||
244 | multipath_alg_unregister(&drr_ops, IP_MP_ALG_DRR); | ||
245 | } | ||
246 | |||
247 | module_init(drr_init); | ||
248 | module_exit(drr_exit); | ||
249 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/ipv4/multipath_random.c b/net/ipv4/multipath_random.c deleted file mode 100644 index c312785d14d0..000000000000 --- a/net/ipv4/multipath_random.c +++ /dev/null | |||
@@ -1,114 +0,0 @@ | |||
1 | /* | ||
2 | * Random policy for multipath. | ||
3 | * | ||
4 | * | ||
5 | * Version: $Id: multipath_random.c,v 1.1.2.3 2004/09/21 08:42:11 elueck Exp $ | ||
6 | * | ||
7 | * Authors: Einar Lueck <elueck@de.ibm.com><lkml@einar-lueck.de> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version | ||
12 | * 2 of the License, or (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <asm/system.h> | ||
16 | #include <asm/uaccess.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/timer.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/fcntl.h> | ||
23 | #include <linux/stat.h> | ||
24 | #include <linux/socket.h> | ||
25 | #include <linux/in.h> | ||
26 | #include <linux/inet.h> | ||
27 | #include <linux/netdevice.h> | ||
28 | #include <linux/inetdevice.h> | ||
29 | #include <linux/igmp.h> | ||
30 | #include <linux/proc_fs.h> | ||
31 | #include <linux/seq_file.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/mroute.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/random.h> | ||
36 | #include <net/ip.h> | ||
37 | #include <net/protocol.h> | ||
38 | #include <linux/skbuff.h> | ||
39 | #include <net/sock.h> | ||
40 | #include <net/icmp.h> | ||
41 | #include <net/udp.h> | ||
42 | #include <net/raw.h> | ||
43 | #include <linux/notifier.h> | ||
44 | #include <linux/if_arp.h> | ||
45 | #include <linux/netfilter_ipv4.h> | ||
46 | #include <net/ipip.h> | ||
47 | #include <net/checksum.h> | ||
48 | #include <net/ip_mp_alg.h> | ||
49 | |||
50 | #define MULTIPATH_MAX_CANDIDATES 40 | ||
51 | |||
52 | static void random_select_route(const struct flowi *flp, | ||
53 | struct rtable *first, | ||
54 | struct rtable **rp) | ||
55 | { | ||
56 | struct rtable *rt; | ||
57 | struct rtable *decision; | ||
58 | unsigned char candidate_count = 0; | ||
59 | |||
60 | /* count all candidate */ | ||
61 | for (rt = rcu_dereference(first); rt; | ||
62 | rt = rcu_dereference(rt->u.dst.rt_next)) { | ||
63 | if ((rt->u.dst.flags & DST_BALANCED) != 0 && | ||
64 | multipath_comparekeys(&rt->fl, flp)) | ||
65 | ++candidate_count; | ||
66 | } | ||
67 | |||
68 | /* choose a random candidate */ | ||
69 | decision = first; | ||
70 | if (candidate_count > 1) { | ||
71 | unsigned char i = 0; | ||
72 | unsigned char candidate_no = (unsigned char) | ||
73 | (random32() % candidate_count); | ||
74 | |||
75 | /* find chosen candidate and adjust GC data for all candidates | ||
76 | * to ensure they stay in cache | ||
77 | */ | ||
78 | for (rt = first; rt; rt = rt->u.dst.rt_next) { | ||
79 | if ((rt->u.dst.flags & DST_BALANCED) != 0 && | ||
80 | multipath_comparekeys(&rt->fl, flp)) { | ||
81 | rt->u.dst.lastuse = jiffies; | ||
82 | |||
83 | if (i == candidate_no) | ||
84 | decision = rt; | ||
85 | |||
86 | if (i >= candidate_count) | ||
87 | break; | ||
88 | |||
89 | i++; | ||
90 | } | ||
91 | } | ||
92 | } | ||
93 | |||
94 | decision->u.dst.__use++; | ||
95 | *rp = decision; | ||
96 | } | ||
97 | |||
98 | static struct ip_mp_alg_ops random_ops = { | ||
99 | .mp_alg_select_route = random_select_route, | ||
100 | }; | ||
101 | |||
102 | static int __init random_init(void) | ||
103 | { | ||
104 | return multipath_alg_register(&random_ops, IP_MP_ALG_RANDOM); | ||
105 | } | ||
106 | |||
107 | static void __exit random_exit(void) | ||
108 | { | ||
109 | multipath_alg_unregister(&random_ops, IP_MP_ALG_RANDOM); | ||
110 | } | ||
111 | |||
112 | module_init(random_init); | ||
113 | module_exit(random_exit); | ||
114 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/ipv4/multipath_rr.c b/net/ipv4/multipath_rr.c deleted file mode 100644 index 0ad22524f450..000000000000 --- a/net/ipv4/multipath_rr.c +++ /dev/null | |||
@@ -1,95 +0,0 @@ | |||
1 | /* | ||
2 | * Round robin policy for multipath. | ||
3 | * | ||
4 | * | ||
5 | * Version: $Id: multipath_rr.c,v 1.1.2.2 2004/09/16 07:42:34 elueck Exp $ | ||
6 | * | ||
7 | * Authors: Einar Lueck <elueck@de.ibm.com><lkml@einar-lueck.de> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version | ||
12 | * 2 of the License, or (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <asm/system.h> | ||
16 | #include <asm/uaccess.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/timer.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/fcntl.h> | ||
23 | #include <linux/stat.h> | ||
24 | #include <linux/socket.h> | ||
25 | #include <linux/in.h> | ||
26 | #include <linux/inet.h> | ||
27 | #include <linux/netdevice.h> | ||
28 | #include <linux/inetdevice.h> | ||
29 | #include <linux/igmp.h> | ||
30 | #include <linux/proc_fs.h> | ||
31 | #include <linux/seq_file.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/mroute.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <net/ip.h> | ||
36 | #include <net/protocol.h> | ||
37 | #include <linux/skbuff.h> | ||
38 | #include <net/sock.h> | ||
39 | #include <net/icmp.h> | ||
40 | #include <net/udp.h> | ||
41 | #include <net/raw.h> | ||
42 | #include <linux/notifier.h> | ||
43 | #include <linux/if_arp.h> | ||
44 | #include <linux/netfilter_ipv4.h> | ||
45 | #include <net/ipip.h> | ||
46 | #include <net/checksum.h> | ||
47 | #include <net/ip_mp_alg.h> | ||
48 | |||
49 | static void rr_select_route(const struct flowi *flp, | ||
50 | struct rtable *first, struct rtable **rp) | ||
51 | { | ||
52 | struct rtable *nh, *result, *min_use_cand = NULL; | ||
53 | int min_use = -1; | ||
54 | |||
55 | /* 1. make sure all alt. nexthops have the same GC related data | ||
56 | * 2. determine the new candidate to be returned | ||
57 | */ | ||
58 | result = NULL; | ||
59 | for (nh = rcu_dereference(first); nh; | ||
60 | nh = rcu_dereference(nh->u.dst.rt_next)) { | ||
61 | if ((nh->u.dst.flags & DST_BALANCED) != 0 && | ||
62 | multipath_comparekeys(&nh->fl, flp)) { | ||
63 | nh->u.dst.lastuse = jiffies; | ||
64 | |||
65 | if (min_use == -1 || nh->u.dst.__use < min_use) { | ||
66 | min_use = nh->u.dst.__use; | ||
67 | min_use_cand = nh; | ||
68 | } | ||
69 | } | ||
70 | } | ||
71 | result = min_use_cand; | ||
72 | if (!result) | ||
73 | result = first; | ||
74 | |||
75 | result->u.dst.__use++; | ||
76 | *rp = result; | ||
77 | } | ||
78 | |||
79 | static struct ip_mp_alg_ops rr_ops = { | ||
80 | .mp_alg_select_route = rr_select_route, | ||
81 | }; | ||
82 | |||
83 | static int __init rr_init(void) | ||
84 | { | ||
85 | return multipath_alg_register(&rr_ops, IP_MP_ALG_RR); | ||
86 | } | ||
87 | |||
88 | static void __exit rr_exit(void) | ||
89 | { | ||
90 | multipath_alg_unregister(&rr_ops, IP_MP_ALG_RR); | ||
91 | } | ||
92 | |||
93 | module_init(rr_init); | ||
94 | module_exit(rr_exit); | ||
95 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/ipv4/multipath_wrandom.c b/net/ipv4/multipath_wrandom.c deleted file mode 100644 index 57c503694539..000000000000 --- a/net/ipv4/multipath_wrandom.c +++ /dev/null | |||
@@ -1,329 +0,0 @@ | |||
1 | /* | ||
2 | * Weighted random policy for multipath. | ||
3 | * | ||
4 | * | ||
5 | * Version: $Id: multipath_wrandom.c,v 1.1.2.3 2004/09/22 07:51:40 elueck Exp $ | ||
6 | * | ||
7 | * Authors: Einar Lueck <elueck@de.ibm.com><lkml@einar-lueck.de> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version | ||
12 | * 2 of the License, or (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <asm/system.h> | ||
16 | #include <asm/uaccess.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/timer.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/fcntl.h> | ||
23 | #include <linux/stat.h> | ||
24 | #include <linux/socket.h> | ||
25 | #include <linux/in.h> | ||
26 | #include <linux/inet.h> | ||
27 | #include <linux/netdevice.h> | ||
28 | #include <linux/inetdevice.h> | ||
29 | #include <linux/igmp.h> | ||
30 | #include <linux/proc_fs.h> | ||
31 | #include <linux/seq_file.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/mroute.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/random.h> | ||
36 | #include <net/ip.h> | ||
37 | #include <net/protocol.h> | ||
38 | #include <linux/skbuff.h> | ||
39 | #include <net/sock.h> | ||
40 | #include <net/icmp.h> | ||
41 | #include <net/udp.h> | ||
42 | #include <net/raw.h> | ||
43 | #include <linux/notifier.h> | ||
44 | #include <linux/if_arp.h> | ||
45 | #include <linux/netfilter_ipv4.h> | ||
46 | #include <net/ipip.h> | ||
47 | #include <net/checksum.h> | ||
48 | #include <net/ip_fib.h> | ||
49 | #include <net/ip_mp_alg.h> | ||
50 | |||
51 | #define MULTIPATH_STATE_SIZE 15 | ||
52 | |||
53 | struct multipath_candidate { | ||
54 | struct multipath_candidate *next; | ||
55 | int power; | ||
56 | struct rtable *rt; | ||
57 | }; | ||
58 | |||
59 | struct multipath_dest { | ||
60 | struct list_head list; | ||
61 | |||
62 | const struct fib_nh *nh_info; | ||
63 | __be32 netmask; | ||
64 | __be32 network; | ||
65 | unsigned char prefixlen; | ||
66 | |||
67 | struct rcu_head rcu; | ||
68 | }; | ||
69 | |||
70 | struct multipath_bucket { | ||
71 | struct list_head head; | ||
72 | spinlock_t lock; | ||
73 | }; | ||
74 | |||
75 | struct multipath_route { | ||
76 | struct list_head list; | ||
77 | |||
78 | int oif; | ||
79 | __be32 gw; | ||
80 | struct list_head dests; | ||
81 | |||
82 | struct rcu_head rcu; | ||
83 | }; | ||
84 | |||
85 | /* state: primarily weight per route information */ | ||
86 | static struct multipath_bucket state[MULTIPATH_STATE_SIZE]; | ||
87 | |||
88 | static unsigned char __multipath_lookup_weight(const struct flowi *fl, | ||
89 | const struct rtable *rt) | ||
90 | { | ||
91 | const int state_idx = rt->idev->dev->ifindex % MULTIPATH_STATE_SIZE; | ||
92 | struct multipath_route *r; | ||
93 | struct multipath_route *target_route = NULL; | ||
94 | struct multipath_dest *d; | ||
95 | int weight = 1; | ||
96 | |||
97 | /* lookup the weight information for a certain route */ | ||
98 | rcu_read_lock(); | ||
99 | |||
100 | /* find state entry for gateway or add one if necessary */ | ||
101 | list_for_each_entry_rcu(r, &state[state_idx].head, list) { | ||
102 | if (r->gw == rt->rt_gateway && | ||
103 | r->oif == rt->idev->dev->ifindex) { | ||
104 | target_route = r; | ||
105 | break; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | if (!target_route) { | ||
110 | /* this should not happen... but we are prepared */ | ||
111 | printk( KERN_CRIT"%s: missing state for gateway: %u and " \ | ||
112 | "device %d\n", __FUNCTION__, rt->rt_gateway, | ||
113 | rt->idev->dev->ifindex); | ||
114 | goto out; | ||
115 | } | ||
116 | |||
117 | /* find state entry for destination */ | ||
118 | list_for_each_entry_rcu(d, &target_route->dests, list) { | ||
119 | __be32 targetnetwork = fl->fl4_dst & | ||
120 | inet_make_mask(d->prefixlen); | ||
121 | |||
122 | if ((targetnetwork & d->netmask) == d->network) { | ||
123 | weight = d->nh_info->nh_weight; | ||
124 | goto out; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | out: | ||
129 | rcu_read_unlock(); | ||
130 | return weight; | ||
131 | } | ||
132 | |||
133 | static void wrandom_init_state(void) | ||
134 | { | ||
135 | int i; | ||
136 | |||
137 | for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) { | ||
138 | INIT_LIST_HEAD(&state[i].head); | ||
139 | spin_lock_init(&state[i].lock); | ||
140 | } | ||
141 | } | ||
142 | |||
143 | static void wrandom_select_route(const struct flowi *flp, | ||
144 | struct rtable *first, | ||
145 | struct rtable **rp) | ||
146 | { | ||
147 | struct rtable *rt; | ||
148 | struct rtable *decision; | ||
149 | struct multipath_candidate *first_mpc = NULL; | ||
150 | struct multipath_candidate *mpc, *last_mpc = NULL; | ||
151 | int power = 0; | ||
152 | int last_power; | ||
153 | int selector; | ||
154 | const size_t size_mpc = sizeof(struct multipath_candidate); | ||
155 | |||
156 | /* collect all candidates and identify their weights */ | ||
157 | for (rt = rcu_dereference(first); rt; | ||
158 | rt = rcu_dereference(rt->u.dst.rt_next)) { | ||
159 | if ((rt->u.dst.flags & DST_BALANCED) != 0 && | ||
160 | multipath_comparekeys(&rt->fl, flp)) { | ||
161 | struct multipath_candidate* mpc = | ||
162 | (struct multipath_candidate*) | ||
163 | kmalloc(size_mpc, GFP_ATOMIC); | ||
164 | |||
165 | if (!mpc) | ||
166 | return; | ||
167 | |||
168 | power += __multipath_lookup_weight(flp, rt) * 10000; | ||
169 | |||
170 | mpc->power = power; | ||
171 | mpc->rt = rt; | ||
172 | mpc->next = NULL; | ||
173 | |||
174 | if (!first_mpc) | ||
175 | first_mpc = mpc; | ||
176 | else | ||
177 | last_mpc->next = mpc; | ||
178 | |||
179 | last_mpc = mpc; | ||
180 | } | ||
181 | } | ||
182 | |||
183 | /* choose a weighted random candidate */ | ||
184 | decision = first; | ||
185 | selector = random32() % power; | ||
186 | last_power = 0; | ||
187 | |||
188 | /* select candidate, adjust GC data and cleanup local state */ | ||
189 | decision = first; | ||
190 | last_mpc = NULL; | ||
191 | for (mpc = first_mpc; mpc; mpc = mpc->next) { | ||
192 | mpc->rt->u.dst.lastuse = jiffies; | ||
193 | if (last_power <= selector && selector < mpc->power) | ||
194 | decision = mpc->rt; | ||
195 | |||
196 | last_power = mpc->power; | ||
197 | kfree(last_mpc); | ||
198 | last_mpc = mpc; | ||
199 | } | ||
200 | |||
201 | /* concurrent __multipath_flush may lead to !last_mpc */ | ||
202 | kfree(last_mpc); | ||
203 | |||
204 | decision->u.dst.__use++; | ||
205 | *rp = decision; | ||
206 | } | ||
207 | |||
208 | static void wrandom_set_nhinfo(__be32 network, | ||
209 | __be32 netmask, | ||
210 | unsigned char prefixlen, | ||
211 | const struct fib_nh *nh) | ||
212 | { | ||
213 | const int state_idx = nh->nh_oif % MULTIPATH_STATE_SIZE; | ||
214 | struct multipath_route *r, *target_route = NULL; | ||
215 | struct multipath_dest *d, *target_dest = NULL; | ||
216 | |||
217 | /* store the weight information for a certain route */ | ||
218 | spin_lock_bh(&state[state_idx].lock); | ||
219 | |||
220 | /* find state entry for gateway or add one if necessary */ | ||
221 | list_for_each_entry_rcu(r, &state[state_idx].head, list) { | ||
222 | if (r->gw == nh->nh_gw && r->oif == nh->nh_oif) { | ||
223 | target_route = r; | ||
224 | break; | ||
225 | } | ||
226 | } | ||
227 | |||
228 | if (!target_route) { | ||
229 | const size_t size_rt = sizeof(struct multipath_route); | ||
230 | target_route = (struct multipath_route *) | ||
231 | kmalloc(size_rt, GFP_ATOMIC); | ||
232 | |||
233 | target_route->gw = nh->nh_gw; | ||
234 | target_route->oif = nh->nh_oif; | ||
235 | memset(&target_route->rcu, 0, sizeof(struct rcu_head)); | ||
236 | INIT_LIST_HEAD(&target_route->dests); | ||
237 | |||
238 | list_add_rcu(&target_route->list, &state[state_idx].head); | ||
239 | } | ||
240 | |||
241 | /* find state entry for destination or add one if necessary */ | ||
242 | list_for_each_entry_rcu(d, &target_route->dests, list) { | ||
243 | if (d->nh_info == nh) { | ||
244 | target_dest = d; | ||
245 | break; | ||
246 | } | ||
247 | } | ||
248 | |||
249 | if (!target_dest) { | ||
250 | const size_t size_dst = sizeof(struct multipath_dest); | ||
251 | target_dest = (struct multipath_dest*) | ||
252 | kmalloc(size_dst, GFP_ATOMIC); | ||
253 | |||
254 | target_dest->nh_info = nh; | ||
255 | target_dest->network = network; | ||
256 | target_dest->netmask = netmask; | ||
257 | target_dest->prefixlen = prefixlen; | ||
258 | memset(&target_dest->rcu, 0, sizeof(struct rcu_head)); | ||
259 | |||
260 | list_add_rcu(&target_dest->list, &target_route->dests); | ||
261 | } | ||
262 | /* else: we already stored this info for another destination => | ||
263 | * we are finished | ||
264 | */ | ||
265 | |||
266 | spin_unlock_bh(&state[state_idx].lock); | ||
267 | } | ||
268 | |||
269 | static void __multipath_free(struct rcu_head *head) | ||
270 | { | ||
271 | struct multipath_route *rt = container_of(head, struct multipath_route, | ||
272 | rcu); | ||
273 | kfree(rt); | ||
274 | } | ||
275 | |||
276 | static void __multipath_free_dst(struct rcu_head *head) | ||
277 | { | ||
278 | struct multipath_dest *dst = container_of(head, | ||
279 | struct multipath_dest, | ||
280 | rcu); | ||
281 | kfree(dst); | ||
282 | } | ||
283 | |||
284 | static void wrandom_flush(void) | ||
285 | { | ||
286 | int i; | ||
287 | |||
288 | /* defere delete to all entries */ | ||
289 | for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) { | ||
290 | struct multipath_route *r; | ||
291 | |||
292 | spin_lock_bh(&state[i].lock); | ||
293 | list_for_each_entry_rcu(r, &state[i].head, list) { | ||
294 | struct multipath_dest *d; | ||
295 | list_for_each_entry_rcu(d, &r->dests, list) { | ||
296 | list_del_rcu(&d->list); | ||
297 | call_rcu(&d->rcu, | ||
298 | __multipath_free_dst); | ||
299 | } | ||
300 | list_del_rcu(&r->list); | ||
301 | call_rcu(&r->rcu, | ||
302 | __multipath_free); | ||
303 | } | ||
304 | |||
305 | spin_unlock_bh(&state[i].lock); | ||
306 | } | ||
307 | } | ||
308 | |||
309 | static struct ip_mp_alg_ops wrandom_ops = { | ||
310 | .mp_alg_select_route = wrandom_select_route, | ||
311 | .mp_alg_flush = wrandom_flush, | ||
312 | .mp_alg_set_nhinfo = wrandom_set_nhinfo, | ||
313 | }; | ||
314 | |||
315 | static int __init wrandom_init(void) | ||
316 | { | ||
317 | wrandom_init_state(); | ||
318 | |||
319 | return multipath_alg_register(&wrandom_ops, IP_MP_ALG_WRANDOM); | ||
320 | } | ||
321 | |||
322 | static void __exit wrandom_exit(void) | ||
323 | { | ||
324 | multipath_alg_unregister(&wrandom_ops, IP_MP_ALG_WRANDOM); | ||
325 | } | ||
326 | |||
327 | module_init(wrandom_init); | ||
328 | module_exit(wrandom_exit); | ||
329 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 46509fae9fd8..fa97947c6ae1 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -230,7 +230,7 @@ config IP_NF_TARGET_NETMAP | |||
230 | To compile it as a module, choose M here. If unsure, say N. | 230 | To compile it as a module, choose M here. If unsure, say N. |
231 | 231 | ||
232 | config IP_NF_TARGET_SAME | 232 | config IP_NF_TARGET_SAME |
233 | tristate "SAME target support" | 233 | tristate "SAME target support (OBSOLETE)" |
234 | depends on NF_NAT | 234 | depends on NF_NAT |
235 | help | 235 | help |
236 | This option adds a `SAME' target, which works like the standard SNAT | 236 | This option adds a `SAME' target, which works like the standard SNAT |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index cae41215e3c7..e981232942a1 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -224,7 +224,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb, | |||
224 | static const char nulldevname[IFNAMSIZ]; | 224 | static const char nulldevname[IFNAMSIZ]; |
225 | unsigned int verdict = NF_DROP; | 225 | unsigned int verdict = NF_DROP; |
226 | struct arphdr *arp; | 226 | struct arphdr *arp; |
227 | int hotdrop = 0; | 227 | bool hotdrop = false; |
228 | struct arpt_entry *e, *back; | 228 | struct arpt_entry *e, *back; |
229 | const char *indev, *outdev; | 229 | const char *indev, *outdev; |
230 | void *table_base; | 230 | void *table_base; |
@@ -1140,13 +1140,13 @@ void arpt_unregister_table(struct arpt_table *table) | |||
1140 | } | 1140 | } |
1141 | 1141 | ||
1142 | /* The built-in targets: standard (NULL) and error. */ | 1142 | /* The built-in targets: standard (NULL) and error. */ |
1143 | static struct arpt_target arpt_standard_target = { | 1143 | static struct arpt_target arpt_standard_target __read_mostly = { |
1144 | .name = ARPT_STANDARD_TARGET, | 1144 | .name = ARPT_STANDARD_TARGET, |
1145 | .targetsize = sizeof(int), | 1145 | .targetsize = sizeof(int), |
1146 | .family = NF_ARP, | 1146 | .family = NF_ARP, |
1147 | }; | 1147 | }; |
1148 | 1148 | ||
1149 | static struct arpt_target arpt_error_target = { | 1149 | static struct arpt_target arpt_error_target __read_mostly = { |
1150 | .name = ARPT_ERROR_TARGET, | 1150 | .name = ARPT_ERROR_TARGET, |
1151 | .target = arpt_error, | 1151 | .target = arpt_error, |
1152 | .targetsize = ARPT_FUNCTION_MAXNAMELEN, | 1152 | .targetsize = ARPT_FUNCTION_MAXNAMELEN, |
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c index 6298d404e7c7..c4bdab47597f 100644 --- a/net/ipv4/netfilter/arpt_mangle.c +++ b/net/ipv4/netfilter/arpt_mangle.c | |||
@@ -65,7 +65,7 @@ target(struct sk_buff **pskb, | |||
65 | return mangle->target; | 65 | return mangle->target; |
66 | } | 66 | } |
67 | 67 | ||
68 | static int | 68 | static bool |
69 | checkentry(const char *tablename, const void *e, const struct xt_target *target, | 69 | checkentry(const char *tablename, const void *e, const struct xt_target *target, |
70 | void *targinfo, unsigned int hook_mask) | 70 | void *targinfo, unsigned int hook_mask) |
71 | { | 71 | { |
@@ -73,15 +73,15 @@ checkentry(const char *tablename, const void *e, const struct xt_target *target, | |||
73 | 73 | ||
74 | if (mangle->flags & ~ARPT_MANGLE_MASK || | 74 | if (mangle->flags & ~ARPT_MANGLE_MASK || |
75 | !(mangle->flags & ARPT_MANGLE_MASK)) | 75 | !(mangle->flags & ARPT_MANGLE_MASK)) |
76 | return 0; | 76 | return false; |
77 | 77 | ||
78 | if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT && | 78 | if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT && |
79 | mangle->target != ARPT_CONTINUE) | 79 | mangle->target != ARPT_CONTINUE) |
80 | return 0; | 80 | return false; |
81 | return 1; | 81 | return true; |
82 | } | 82 | } |
83 | 83 | ||
84 | static struct arpt_target arpt_mangle_reg = { | 84 | static struct arpt_target arpt_mangle_reg __read_mostly = { |
85 | .name = "mangle", | 85 | .name = "mangle", |
86 | .target = target, | 86 | .target = target, |
87 | .targetsize = sizeof(struct arpt_mangle), | 87 | .targetsize = sizeof(struct arpt_mangle), |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 9bacf1a03630..e1b402c6b855 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -152,20 +152,20 @@ ip_packet_match(const struct iphdr *ip, | |||
152 | return 1; | 152 | return 1; |
153 | } | 153 | } |
154 | 154 | ||
155 | static inline int | 155 | static inline bool |
156 | ip_checkentry(const struct ipt_ip *ip) | 156 | ip_checkentry(const struct ipt_ip *ip) |
157 | { | 157 | { |
158 | if (ip->flags & ~IPT_F_MASK) { | 158 | if (ip->flags & ~IPT_F_MASK) { |
159 | duprintf("Unknown flag bits set: %08X\n", | 159 | duprintf("Unknown flag bits set: %08X\n", |
160 | ip->flags & ~IPT_F_MASK); | 160 | ip->flags & ~IPT_F_MASK); |
161 | return 0; | 161 | return false; |
162 | } | 162 | } |
163 | if (ip->invflags & ~IPT_INV_MASK) { | 163 | if (ip->invflags & ~IPT_INV_MASK) { |
164 | duprintf("Unknown invflag bits set: %08X\n", | 164 | duprintf("Unknown invflag bits set: %08X\n", |
165 | ip->invflags & ~IPT_INV_MASK); | 165 | ip->invflags & ~IPT_INV_MASK); |
166 | return 0; | 166 | return false; |
167 | } | 167 | } |
168 | return 1; | 168 | return true; |
169 | } | 169 | } |
170 | 170 | ||
171 | static unsigned int | 171 | static unsigned int |
@@ -183,19 +183,19 @@ ipt_error(struct sk_buff **pskb, | |||
183 | } | 183 | } |
184 | 184 | ||
185 | static inline | 185 | static inline |
186 | int do_match(struct ipt_entry_match *m, | 186 | bool do_match(struct ipt_entry_match *m, |
187 | const struct sk_buff *skb, | 187 | const struct sk_buff *skb, |
188 | const struct net_device *in, | 188 | const struct net_device *in, |
189 | const struct net_device *out, | 189 | const struct net_device *out, |
190 | int offset, | 190 | int offset, |
191 | int *hotdrop) | 191 | bool *hotdrop) |
192 | { | 192 | { |
193 | /* Stop iteration if it doesn't match */ | 193 | /* Stop iteration if it doesn't match */ |
194 | if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data, | 194 | if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data, |
195 | offset, ip_hdrlen(skb), hotdrop)) | 195 | offset, ip_hdrlen(skb), hotdrop)) |
196 | return 1; | 196 | return true; |
197 | else | 197 | else |
198 | return 0; | 198 | return false; |
199 | } | 199 | } |
200 | 200 | ||
201 | static inline struct ipt_entry * | 201 | static inline struct ipt_entry * |
@@ -204,6 +204,112 @@ get_entry(void *base, unsigned int offset) | |||
204 | return (struct ipt_entry *)(base + offset); | 204 | return (struct ipt_entry *)(base + offset); |
205 | } | 205 | } |
206 | 206 | ||
207 | /* All zeroes == unconditional rule. */ | ||
208 | static inline int | ||
209 | unconditional(const struct ipt_ip *ip) | ||
210 | { | ||
211 | unsigned int i; | ||
212 | |||
213 | for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++) | ||
214 | if (((__u32 *)ip)[i]) | ||
215 | return 0; | ||
216 | |||
217 | return 1; | ||
218 | } | ||
219 | |||
220 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | ||
221 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | ||
222 | static const char *hooknames[] = { | ||
223 | [NF_IP_PRE_ROUTING] = "PREROUTING", | ||
224 | [NF_IP_LOCAL_IN] = "INPUT", | ||
225 | [NF_IP_FORWARD] = "FORWARD", | ||
226 | [NF_IP_LOCAL_OUT] = "OUTPUT", | ||
227 | [NF_IP_POST_ROUTING] = "POSTROUTING", | ||
228 | }; | ||
229 | |||
230 | enum nf_ip_trace_comments { | ||
231 | NF_IP_TRACE_COMMENT_RULE, | ||
232 | NF_IP_TRACE_COMMENT_RETURN, | ||
233 | NF_IP_TRACE_COMMENT_POLICY, | ||
234 | }; | ||
235 | |||
236 | static const char *comments[] = { | ||
237 | [NF_IP_TRACE_COMMENT_RULE] = "rule", | ||
238 | [NF_IP_TRACE_COMMENT_RETURN] = "return", | ||
239 | [NF_IP_TRACE_COMMENT_POLICY] = "policy", | ||
240 | }; | ||
241 | |||
242 | static struct nf_loginfo trace_loginfo = { | ||
243 | .type = NF_LOG_TYPE_LOG, | ||
244 | .u = { | ||
245 | .log = { | ||
246 | .level = 4, | ||
247 | .logflags = NF_LOG_MASK, | ||
248 | }, | ||
249 | }, | ||
250 | }; | ||
251 | |||
252 | static inline int | ||
253 | get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e, | ||
254 | char *hookname, char **chainname, | ||
255 | char **comment, unsigned int *rulenum) | ||
256 | { | ||
257 | struct ipt_standard_target *t = (void *)ipt_get_target(s); | ||
258 | |||
259 | if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) { | ||
260 | /* Head of user chain: ERROR target with chainname */ | ||
261 | *chainname = t->target.data; | ||
262 | (*rulenum) = 0; | ||
263 | } else if (s == e) { | ||
264 | (*rulenum)++; | ||
265 | |||
266 | if (s->target_offset == sizeof(struct ipt_entry) | ||
267 | && strcmp(t->target.u.kernel.target->name, | ||
268 | IPT_STANDARD_TARGET) == 0 | ||
269 | && t->verdict < 0 | ||
270 | && unconditional(&s->ip)) { | ||
271 | /* Tail of chains: STANDARD target (return/policy) */ | ||
272 | *comment = *chainname == hookname | ||
273 | ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY] | ||
274 | : (char *)comments[NF_IP_TRACE_COMMENT_RETURN]; | ||
275 | } | ||
276 | return 1; | ||
277 | } else | ||
278 | (*rulenum)++; | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | static void trace_packet(struct sk_buff *skb, | ||
284 | unsigned int hook, | ||
285 | const struct net_device *in, | ||
286 | const struct net_device *out, | ||
287 | char *tablename, | ||
288 | struct xt_table_info *private, | ||
289 | struct ipt_entry *e) | ||
290 | { | ||
291 | void *table_base; | ||
292 | struct ipt_entry *root; | ||
293 | char *hookname, *chainname, *comment; | ||
294 | unsigned int rulenum = 0; | ||
295 | |||
296 | table_base = (void *)private->entries[smp_processor_id()]; | ||
297 | root = get_entry(table_base, private->hook_entry[hook]); | ||
298 | |||
299 | hookname = chainname = (char *)hooknames[hook]; | ||
300 | comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE]; | ||
301 | |||
302 | IPT_ENTRY_ITERATE(root, | ||
303 | private->size - private->hook_entry[hook], | ||
304 | get_chainname_rulenum, | ||
305 | e, hookname, &chainname, &comment, &rulenum); | ||
306 | |||
307 | nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo, | ||
308 | "TRACE: %s:%s:%s:%u ", | ||
309 | tablename, chainname, comment, rulenum); | ||
310 | } | ||
311 | #endif | ||
312 | |||
207 | /* Returns one of the generic firewall policies, like NF_ACCEPT. */ | 313 | /* Returns one of the generic firewall policies, like NF_ACCEPT. */ |
208 | unsigned int | 314 | unsigned int |
209 | ipt_do_table(struct sk_buff **pskb, | 315 | ipt_do_table(struct sk_buff **pskb, |
@@ -216,7 +322,7 @@ ipt_do_table(struct sk_buff **pskb, | |||
216 | u_int16_t offset; | 322 | u_int16_t offset; |
217 | struct iphdr *ip; | 323 | struct iphdr *ip; |
218 | u_int16_t datalen; | 324 | u_int16_t datalen; |
219 | int hotdrop = 0; | 325 | bool hotdrop = false; |
220 | /* Initializing verdict to NF_DROP keeps gcc happy. */ | 326 | /* Initializing verdict to NF_DROP keeps gcc happy. */ |
221 | unsigned int verdict = NF_DROP; | 327 | unsigned int verdict = NF_DROP; |
222 | const char *indev, *outdev; | 328 | const char *indev, *outdev; |
@@ -261,6 +367,14 @@ ipt_do_table(struct sk_buff **pskb, | |||
261 | 367 | ||
262 | t = ipt_get_target(e); | 368 | t = ipt_get_target(e); |
263 | IP_NF_ASSERT(t->u.kernel.target); | 369 | IP_NF_ASSERT(t->u.kernel.target); |
370 | |||
371 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | ||
372 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | ||
373 | /* The packet is traced: log it */ | ||
374 | if (unlikely((*pskb)->nf_trace)) | ||
375 | trace_packet(*pskb, hook, in, out, | ||
376 | table->name, private, e); | ||
377 | #endif | ||
264 | /* Standard target? */ | 378 | /* Standard target? */ |
265 | if (!t->u.kernel.target->target) { | 379 | if (!t->u.kernel.target->target) { |
266 | int v; | 380 | int v; |
@@ -341,19 +455,6 @@ ipt_do_table(struct sk_buff **pskb, | |||
341 | #endif | 455 | #endif |
342 | } | 456 | } |
343 | 457 | ||
344 | /* All zeroes == unconditional rule. */ | ||
345 | static inline int | ||
346 | unconditional(const struct ipt_ip *ip) | ||
347 | { | ||
348 | unsigned int i; | ||
349 | |||
350 | for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++) | ||
351 | if (((__u32 *)ip)[i]) | ||
352 | return 0; | ||
353 | |||
354 | return 1; | ||
355 | } | ||
356 | |||
357 | /* Figures out from what hook each rule can be called: returns 0 if | 458 | /* Figures out from what hook each rule can be called: returns 0 if |
358 | there are loops. Puts hook bitmask in comefrom. */ | 459 | there are loops. Puts hook bitmask in comefrom. */ |
359 | static int | 460 | static int |
@@ -2105,16 +2206,16 @@ void ipt_unregister_table(struct xt_table *table) | |||
2105 | } | 2206 | } |
2106 | 2207 | ||
2107 | /* Returns 1 if the type and code is matched by the range, 0 otherwise */ | 2208 | /* Returns 1 if the type and code is matched by the range, 0 otherwise */ |
2108 | static inline int | 2209 | static inline bool |
2109 | icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, | 2210 | icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, |
2110 | u_int8_t type, u_int8_t code, | 2211 | u_int8_t type, u_int8_t code, |
2111 | int invert) | 2212 | bool invert) |
2112 | { | 2213 | { |
2113 | return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code)) | 2214 | return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code)) |
2114 | ^ invert; | 2215 | ^ invert; |
2115 | } | 2216 | } |
2116 | 2217 | ||
2117 | static int | 2218 | static bool |
2118 | icmp_match(const struct sk_buff *skb, | 2219 | icmp_match(const struct sk_buff *skb, |
2119 | const struct net_device *in, | 2220 | const struct net_device *in, |
2120 | const struct net_device *out, | 2221 | const struct net_device *out, |
@@ -2122,14 +2223,14 @@ icmp_match(const struct sk_buff *skb, | |||
2122 | const void *matchinfo, | 2223 | const void *matchinfo, |
2123 | int offset, | 2224 | int offset, |
2124 | unsigned int protoff, | 2225 | unsigned int protoff, |
2125 | int *hotdrop) | 2226 | bool *hotdrop) |
2126 | { | 2227 | { |
2127 | struct icmphdr _icmph, *ic; | 2228 | struct icmphdr _icmph, *ic; |
2128 | const struct ipt_icmp *icmpinfo = matchinfo; | 2229 | const struct ipt_icmp *icmpinfo = matchinfo; |
2129 | 2230 | ||
2130 | /* Must not be a fragment. */ | 2231 | /* Must not be a fragment. */ |
2131 | if (offset) | 2232 | if (offset) |
2132 | return 0; | 2233 | return false; |
2133 | 2234 | ||
2134 | ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph); | 2235 | ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph); |
2135 | if (ic == NULL) { | 2236 | if (ic == NULL) { |
@@ -2137,8 +2238,8 @@ icmp_match(const struct sk_buff *skb, | |||
2137 | * can't. Hence, no choice but to drop. | 2238 | * can't. Hence, no choice but to drop. |
2138 | */ | 2239 | */ |
2139 | duprintf("Dropping evil ICMP tinygram.\n"); | 2240 | duprintf("Dropping evil ICMP tinygram.\n"); |
2140 | *hotdrop = 1; | 2241 | *hotdrop = true; |
2141 | return 0; | 2242 | return false; |
2142 | } | 2243 | } |
2143 | 2244 | ||
2144 | return icmp_type_code_match(icmpinfo->type, | 2245 | return icmp_type_code_match(icmpinfo->type, |
@@ -2149,7 +2250,7 @@ icmp_match(const struct sk_buff *skb, | |||
2149 | } | 2250 | } |
2150 | 2251 | ||
2151 | /* Called when user tries to insert an entry of this type. */ | 2252 | /* Called when user tries to insert an entry of this type. */ |
2152 | static int | 2253 | static bool |
2153 | icmp_checkentry(const char *tablename, | 2254 | icmp_checkentry(const char *tablename, |
2154 | const void *info, | 2255 | const void *info, |
2155 | const struct xt_match *match, | 2256 | const struct xt_match *match, |
@@ -2163,7 +2264,7 @@ icmp_checkentry(const char *tablename, | |||
2163 | } | 2264 | } |
2164 | 2265 | ||
2165 | /* The built-in targets: standard (NULL) and error. */ | 2266 | /* The built-in targets: standard (NULL) and error. */ |
2166 | static struct xt_target ipt_standard_target = { | 2267 | static struct xt_target ipt_standard_target __read_mostly = { |
2167 | .name = IPT_STANDARD_TARGET, | 2268 | .name = IPT_STANDARD_TARGET, |
2168 | .targetsize = sizeof(int), | 2269 | .targetsize = sizeof(int), |
2169 | .family = AF_INET, | 2270 | .family = AF_INET, |
@@ -2174,7 +2275,7 @@ static struct xt_target ipt_standard_target = { | |||
2174 | #endif | 2275 | #endif |
2175 | }; | 2276 | }; |
2176 | 2277 | ||
2177 | static struct xt_target ipt_error_target = { | 2278 | static struct xt_target ipt_error_target __read_mostly = { |
2178 | .name = IPT_ERROR_TARGET, | 2279 | .name = IPT_ERROR_TARGET, |
2179 | .target = ipt_error, | 2280 | .target = ipt_error, |
2180 | .targetsize = IPT_FUNCTION_MAXNAMELEN, | 2281 | .targetsize = IPT_FUNCTION_MAXNAMELEN, |
@@ -2197,7 +2298,7 @@ static struct nf_sockopt_ops ipt_sockopts = { | |||
2197 | #endif | 2298 | #endif |
2198 | }; | 2299 | }; |
2199 | 2300 | ||
2200 | static struct xt_match icmp_matchstruct = { | 2301 | static struct xt_match icmp_matchstruct __read_mostly = { |
2201 | .name = "icmp", | 2302 | .name = "icmp", |
2202 | .match = icmp_match, | 2303 | .match = icmp_match, |
2203 | .matchsize = sizeof(struct ipt_icmp), | 2304 | .matchsize = sizeof(struct ipt_icmp), |
@@ -2230,7 +2331,7 @@ static int __init ip_tables_init(void) | |||
2230 | if (ret < 0) | 2331 | if (ret < 0) |
2231 | goto err5; | 2332 | goto err5; |
2232 | 2333 | ||
2233 | printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n"); | 2334 | printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n"); |
2234 | return 0; | 2335 | return 0; |
2235 | 2336 | ||
2236 | err5: | 2337 | err5: |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 40e273421398..dcc12b183474 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -30,14 +30,6 @@ | |||
30 | 30 | ||
31 | #define CLUSTERIP_VERSION "0.8" | 31 | #define CLUSTERIP_VERSION "0.8" |
32 | 32 | ||
33 | #define DEBUG_CLUSTERIP | ||
34 | |||
35 | #ifdef DEBUG_CLUSTERIP | ||
36 | #define DEBUGP printk | ||
37 | #else | ||
38 | #define DEBUGP | ||
39 | #endif | ||
40 | |||
41 | MODULE_LICENSE("GPL"); | 33 | MODULE_LICENSE("GPL"); |
42 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | 34 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
43 | MODULE_DESCRIPTION("iptables target for CLUSTERIP"); | 35 | MODULE_DESCRIPTION("iptables target for CLUSTERIP"); |
@@ -122,9 +114,8 @@ __clusterip_config_find(__be32 clusterip) | |||
122 | list_for_each(pos, &clusterip_configs) { | 114 | list_for_each(pos, &clusterip_configs) { |
123 | struct clusterip_config *c = list_entry(pos, | 115 | struct clusterip_config *c = list_entry(pos, |
124 | struct clusterip_config, list); | 116 | struct clusterip_config, list); |
125 | if (c->clusterip == clusterip) { | 117 | if (c->clusterip == clusterip) |
126 | return c; | 118 | return c; |
127 | } | ||
128 | } | 119 | } |
129 | 120 | ||
130 | return NULL; | 121 | return NULL; |
@@ -155,9 +146,8 @@ clusterip_config_init_nodelist(struct clusterip_config *c, | |||
155 | { | 146 | { |
156 | int n; | 147 | int n; |
157 | 148 | ||
158 | for (n = 0; n < i->num_local_nodes; n++) { | 149 | for (n = 0; n < i->num_local_nodes; n++) |
159 | set_bit(i->local_nodes[n] - 1, &c->local_nodes); | 150 | set_bit(i->local_nodes[n] - 1, &c->local_nodes); |
160 | } | ||
161 | } | 151 | } |
162 | 152 | ||
163 | static struct clusterip_config * | 153 | static struct clusterip_config * |
@@ -220,27 +210,28 @@ clusterip_add_node(struct clusterip_config *c, u_int16_t nodenum) | |||
220 | return 0; | 210 | return 0; |
221 | } | 211 | } |
222 | 212 | ||
223 | static int | 213 | static bool |
224 | clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum) | 214 | clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum) |
225 | { | 215 | { |
226 | if (nodenum == 0 || | 216 | if (nodenum == 0 || |
227 | nodenum > c->num_total_nodes) | 217 | nodenum > c->num_total_nodes) |
228 | return 1; | 218 | return true; |
229 | 219 | ||
230 | if (test_and_clear_bit(nodenum - 1, &c->local_nodes)) | 220 | if (test_and_clear_bit(nodenum - 1, &c->local_nodes)) |
231 | return 0; | 221 | return false; |
232 | 222 | ||
233 | return 1; | 223 | return true; |
234 | } | 224 | } |
235 | #endif | 225 | #endif |
236 | 226 | ||
237 | static inline u_int32_t | 227 | static inline u_int32_t |
238 | clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config) | 228 | clusterip_hashfn(const struct sk_buff *skb, |
229 | const struct clusterip_config *config) | ||
239 | { | 230 | { |
240 | struct iphdr *iph = ip_hdr(skb); | 231 | const struct iphdr *iph = ip_hdr(skb); |
241 | unsigned long hashval; | 232 | unsigned long hashval; |
242 | u_int16_t sport, dport; | 233 | u_int16_t sport, dport; |
243 | u_int16_t *ports; | 234 | const u_int16_t *ports; |
244 | 235 | ||
245 | switch (iph->protocol) { | 236 | switch (iph->protocol) { |
246 | case IPPROTO_TCP: | 237 | case IPPROTO_TCP: |
@@ -249,15 +240,14 @@ clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config) | |||
249 | case IPPROTO_SCTP: | 240 | case IPPROTO_SCTP: |
250 | case IPPROTO_DCCP: | 241 | case IPPROTO_DCCP: |
251 | case IPPROTO_ICMP: | 242 | case IPPROTO_ICMP: |
252 | ports = (void *)iph+iph->ihl*4; | 243 | ports = (const void *)iph+iph->ihl*4; |
253 | sport = ports[0]; | 244 | sport = ports[0]; |
254 | dport = ports[1]; | 245 | dport = ports[1]; |
255 | break; | 246 | break; |
256 | default: | 247 | default: |
257 | if (net_ratelimit()) { | 248 | if (net_ratelimit()) |
258 | printk(KERN_NOTICE "CLUSTERIP: unknown protocol `%u'\n", | 249 | printk(KERN_NOTICE "CLUSTERIP: unknown protocol `%u'\n", |
259 | iph->protocol); | 250 | iph->protocol); |
260 | } | ||
261 | sport = dport = 0; | 251 | sport = dport = 0; |
262 | } | 252 | } |
263 | 253 | ||
@@ -285,11 +275,11 @@ clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config) | |||
285 | } | 275 | } |
286 | 276 | ||
287 | /* node numbers are 1..n, not 0..n */ | 277 | /* node numbers are 1..n, not 0..n */ |
288 | return ((hashval % config->num_total_nodes)+1); | 278 | return (hashval % config->num_total_nodes) + 1; |
289 | } | 279 | } |
290 | 280 | ||
291 | static inline int | 281 | static inline int |
292 | clusterip_responsible(struct clusterip_config *config, u_int32_t hash) | 282 | clusterip_responsible(const struct clusterip_config *config, u_int32_t hash) |
293 | { | 283 | { |
294 | return test_bit(hash - 1, &config->local_nodes); | 284 | return test_bit(hash - 1, &config->local_nodes); |
295 | } | 285 | } |
@@ -353,15 +343,15 @@ target(struct sk_buff **pskb, | |||
353 | break; | 343 | break; |
354 | } | 344 | } |
355 | 345 | ||
356 | #ifdef DEBUG_CLUSTERP | 346 | #ifdef DEBUG |
357 | DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | 347 | DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
358 | #endif | 348 | #endif |
359 | DEBUGP("hash=%u ct_hash=%u ", hash, ct->mark); | 349 | pr_debug("hash=%u ct_hash=%u ", hash, ct->mark); |
360 | if (!clusterip_responsible(cipinfo->config, hash)) { | 350 | if (!clusterip_responsible(cipinfo->config, hash)) { |
361 | DEBUGP("not responsible\n"); | 351 | pr_debug("not responsible\n"); |
362 | return NF_DROP; | 352 | return NF_DROP; |
363 | } | 353 | } |
364 | DEBUGP("responsible\n"); | 354 | pr_debug("responsible\n"); |
365 | 355 | ||
366 | /* despite being received via linklayer multicast, this is | 356 | /* despite being received via linklayer multicast, this is |
367 | * actually a unicast IP packet. TCP doesn't like PACKET_MULTICAST */ | 357 | * actually a unicast IP packet. TCP doesn't like PACKET_MULTICAST */ |
@@ -370,7 +360,7 @@ target(struct sk_buff **pskb, | |||
370 | return XT_CONTINUE; | 360 | return XT_CONTINUE; |
371 | } | 361 | } |
372 | 362 | ||
373 | static int | 363 | static bool |
374 | checkentry(const char *tablename, | 364 | checkentry(const char *tablename, |
375 | const void *e_void, | 365 | const void *e_void, |
376 | const struct xt_target *target, | 366 | const struct xt_target *target, |
@@ -387,50 +377,34 @@ checkentry(const char *tablename, | |||
387 | cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT_DPT) { | 377 | cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT_DPT) { |
388 | printk(KERN_WARNING "CLUSTERIP: unknown mode `%u'\n", | 378 | printk(KERN_WARNING "CLUSTERIP: unknown mode `%u'\n", |
389 | cipinfo->hash_mode); | 379 | cipinfo->hash_mode); |
390 | return 0; | 380 | return false; |
391 | 381 | ||
392 | } | 382 | } |
393 | if (e->ip.dmsk.s_addr != htonl(0xffffffff) | 383 | if (e->ip.dmsk.s_addr != htonl(0xffffffff) |
394 | || e->ip.dst.s_addr == 0) { | 384 | || e->ip.dst.s_addr == 0) { |
395 | printk(KERN_ERR "CLUSTERIP: Please specify destination IP\n"); | 385 | printk(KERN_ERR "CLUSTERIP: Please specify destination IP\n"); |
396 | return 0; | 386 | return false; |
397 | } | 387 | } |
398 | 388 | ||
399 | /* FIXME: further sanity checks */ | 389 | /* FIXME: further sanity checks */ |
400 | 390 | ||
401 | config = clusterip_config_find_get(e->ip.dst.s_addr, 1); | 391 | config = clusterip_config_find_get(e->ip.dst.s_addr, 1); |
402 | if (config) { | 392 | if (!config) { |
403 | if (cipinfo->config != NULL) { | ||
404 | /* Case A: This is an entry that gets reloaded, since | ||
405 | * it still has a cipinfo->config pointer. Simply | ||
406 | * increase the entry refcount and return */ | ||
407 | if (cipinfo->config != config) { | ||
408 | printk(KERN_ERR "CLUSTERIP: Reloaded entry " | ||
409 | "has invalid config pointer!\n"); | ||
410 | return 0; | ||
411 | } | ||
412 | } else { | ||
413 | /* Case B: This is a new rule referring to an existing | ||
414 | * clusterip config. */ | ||
415 | cipinfo->config = config; | ||
416 | } | ||
417 | } else { | ||
418 | /* Case C: This is a completely new clusterip config */ | ||
419 | if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) { | 393 | if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) { |
420 | printk(KERN_WARNING "CLUSTERIP: no config found for %u.%u.%u.%u, need 'new'\n", NIPQUAD(e->ip.dst.s_addr)); | 394 | printk(KERN_WARNING "CLUSTERIP: no config found for %u.%u.%u.%u, need 'new'\n", NIPQUAD(e->ip.dst.s_addr)); |
421 | return 0; | 395 | return false; |
422 | } else { | 396 | } else { |
423 | struct net_device *dev; | 397 | struct net_device *dev; |
424 | 398 | ||
425 | if (e->ip.iniface[0] == '\0') { | 399 | if (e->ip.iniface[0] == '\0') { |
426 | printk(KERN_WARNING "CLUSTERIP: Please specify an interface name\n"); | 400 | printk(KERN_WARNING "CLUSTERIP: Please specify an interface name\n"); |
427 | return 0; | 401 | return false; |
428 | } | 402 | } |
429 | 403 | ||
430 | dev = dev_get_by_name(e->ip.iniface); | 404 | dev = dev_get_by_name(e->ip.iniface); |
431 | if (!dev) { | 405 | if (!dev) { |
432 | printk(KERN_WARNING "CLUSTERIP: no such interface %s\n", e->ip.iniface); | 406 | printk(KERN_WARNING "CLUSTERIP: no such interface %s\n", e->ip.iniface); |
433 | return 0; | 407 | return false; |
434 | } | 408 | } |
435 | 409 | ||
436 | config = clusterip_config_init(cipinfo, | 410 | config = clusterip_config_init(cipinfo, |
@@ -438,20 +412,20 @@ checkentry(const char *tablename, | |||
438 | if (!config) { | 412 | if (!config) { |
439 | printk(KERN_WARNING "CLUSTERIP: cannot allocate config\n"); | 413 | printk(KERN_WARNING "CLUSTERIP: cannot allocate config\n"); |
440 | dev_put(dev); | 414 | dev_put(dev); |
441 | return 0; | 415 | return false; |
442 | } | 416 | } |
443 | dev_mc_add(config->dev,config->clustermac, ETH_ALEN, 0); | 417 | dev_mc_add(config->dev,config->clustermac, ETH_ALEN, 0); |
444 | } | 418 | } |
445 | cipinfo->config = config; | ||
446 | } | 419 | } |
420 | cipinfo->config = config; | ||
447 | 421 | ||
448 | if (nf_ct_l3proto_try_module_get(target->family) < 0) { | 422 | if (nf_ct_l3proto_try_module_get(target->family) < 0) { |
449 | printk(KERN_WARNING "can't load conntrack support for " | 423 | printk(KERN_WARNING "can't load conntrack support for " |
450 | "proto=%d\n", target->family); | 424 | "proto=%d\n", target->family); |
451 | return 0; | 425 | return false; |
452 | } | 426 | } |
453 | 427 | ||
454 | return 1; | 428 | return true; |
455 | } | 429 | } |
456 | 430 | ||
457 | /* drop reference count of cluster config when rule is deleted */ | 431 | /* drop reference count of cluster config when rule is deleted */ |
@@ -468,13 +442,30 @@ static void destroy(const struct xt_target *target, void *targinfo) | |||
468 | nf_ct_l3proto_module_put(target->family); | 442 | nf_ct_l3proto_module_put(target->family); |
469 | } | 443 | } |
470 | 444 | ||
471 | static struct xt_target clusterip_tgt = { | 445 | #ifdef CONFIG_COMPAT |
446 | struct compat_ipt_clusterip_tgt_info | ||
447 | { | ||
448 | u_int32_t flags; | ||
449 | u_int8_t clustermac[6]; | ||
450 | u_int16_t num_total_nodes; | ||
451 | u_int16_t num_local_nodes; | ||
452 | u_int16_t local_nodes[CLUSTERIP_MAX_NODES]; | ||
453 | u_int32_t hash_mode; | ||
454 | u_int32_t hash_initval; | ||
455 | compat_uptr_t config; | ||
456 | }; | ||
457 | #endif /* CONFIG_COMPAT */ | ||
458 | |||
459 | static struct xt_target clusterip_tgt __read_mostly = { | ||
472 | .name = "CLUSTERIP", | 460 | .name = "CLUSTERIP", |
473 | .family = AF_INET, | 461 | .family = AF_INET, |
474 | .target = target, | 462 | .target = target, |
475 | .targetsize = sizeof(struct ipt_clusterip_tgt_info), | ||
476 | .checkentry = checkentry, | 463 | .checkentry = checkentry, |
477 | .destroy = destroy, | 464 | .destroy = destroy, |
465 | .targetsize = sizeof(struct ipt_clusterip_tgt_info), | ||
466 | #ifdef CONFIG_COMPAT | ||
467 | .compatsize = sizeof(struct compat_ipt_clusterip_tgt_info), | ||
468 | #endif /* CONFIG_COMPAT */ | ||
478 | .me = THIS_MODULE | 469 | .me = THIS_MODULE |
479 | }; | 470 | }; |
480 | 471 | ||
@@ -491,7 +482,7 @@ struct arp_payload { | |||
491 | __be32 dst_ip; | 482 | __be32 dst_ip; |
492 | } __attribute__ ((packed)); | 483 | } __attribute__ ((packed)); |
493 | 484 | ||
494 | #ifdef CLUSTERIP_DEBUG | 485 | #ifdef DEBUG |
495 | static void arp_print(struct arp_payload *payload) | 486 | static void arp_print(struct arp_payload *payload) |
496 | { | 487 | { |
497 | #define HBUFFERLEN 30 | 488 | #define HBUFFERLEN 30 |
@@ -547,8 +538,9 @@ arp_mangle(unsigned int hook, | |||
547 | * this wouldn't work, since we didn't subscribe the mcast group on | 538 | * this wouldn't work, since we didn't subscribe the mcast group on |
548 | * other interfaces */ | 539 | * other interfaces */ |
549 | if (c->dev != out) { | 540 | if (c->dev != out) { |
550 | DEBUGP("CLUSTERIP: not mangling arp reply on different " | 541 | pr_debug("CLUSTERIP: not mangling arp reply on different " |
551 | "interface: cip'%s'-skb'%s'\n", c->dev->name, out->name); | 542 | "interface: cip'%s'-skb'%s'\n", |
543 | c->dev->name, out->name); | ||
552 | clusterip_config_put(c); | 544 | clusterip_config_put(c); |
553 | return NF_ACCEPT; | 545 | return NF_ACCEPT; |
554 | } | 546 | } |
@@ -556,8 +548,8 @@ arp_mangle(unsigned int hook, | |||
556 | /* mangle reply hardware address */ | 548 | /* mangle reply hardware address */ |
557 | memcpy(payload->src_hw, c->clustermac, arp->ar_hln); | 549 | memcpy(payload->src_hw, c->clustermac, arp->ar_hln); |
558 | 550 | ||
559 | #ifdef CLUSTERIP_DEBUG | 551 | #ifdef DEBUG |
560 | DEBUGP(KERN_DEBUG "CLUSTERIP mangled arp reply: "); | 552 | pr_debug(KERN_DEBUG "CLUSTERIP mangled arp reply: "); |
561 | arp_print(payload); | 553 | arp_print(payload); |
562 | #endif | 554 | #endif |
563 | 555 | ||
@@ -647,7 +639,7 @@ static int clusterip_seq_show(struct seq_file *s, void *v) | |||
647 | return 0; | 639 | return 0; |
648 | } | 640 | } |
649 | 641 | ||
650 | static struct seq_operations clusterip_seq_ops = { | 642 | static const struct seq_operations clusterip_seq_ops = { |
651 | .start = clusterip_seq_start, | 643 | .start = clusterip_seq_start, |
652 | .next = clusterip_seq_next, | 644 | .next = clusterip_seq_next, |
653 | .stop = clusterip_seq_stop, | 645 | .stop = clusterip_seq_stop, |
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c index 918ca92e534a..f1253bd3837f 100644 --- a/net/ipv4/netfilter/ipt_ECN.c +++ b/net/ipv4/netfilter/ipt_ECN.c | |||
@@ -24,8 +24,8 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | |||
24 | MODULE_DESCRIPTION("iptables ECN modification module"); | 24 | MODULE_DESCRIPTION("iptables ECN modification module"); |
25 | 25 | ||
26 | /* set ECT codepoint from IP header. | 26 | /* set ECT codepoint from IP header. |
27 | * return 0 if there was an error. */ | 27 | * return false if there was an error. */ |
28 | static inline int | 28 | static inline bool |
29 | set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) | 29 | set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) |
30 | { | 30 | { |
31 | struct iphdr *iph = ip_hdr(*pskb); | 31 | struct iphdr *iph = ip_hdr(*pskb); |
@@ -33,18 +33,18 @@ set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) | |||
33 | if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) { | 33 | if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) { |
34 | __u8 oldtos; | 34 | __u8 oldtos; |
35 | if (!skb_make_writable(pskb, sizeof(struct iphdr))) | 35 | if (!skb_make_writable(pskb, sizeof(struct iphdr))) |
36 | return 0; | 36 | return false; |
37 | iph = ip_hdr(*pskb); | 37 | iph = ip_hdr(*pskb); |
38 | oldtos = iph->tos; | 38 | oldtos = iph->tos; |
39 | iph->tos &= ~IPT_ECN_IP_MASK; | 39 | iph->tos &= ~IPT_ECN_IP_MASK; |
40 | iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK); | 40 | iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK); |
41 | nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos)); | 41 | nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos)); |
42 | } | 42 | } |
43 | return 1; | 43 | return true; |
44 | } | 44 | } |
45 | 45 | ||
46 | /* Return 0 if there was an error. */ | 46 | /* Return false if there was an error. */ |
47 | static inline int | 47 | static inline bool |
48 | set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) | 48 | set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) |
49 | { | 49 | { |
50 | struct tcphdr _tcph, *tcph; | 50 | struct tcphdr _tcph, *tcph; |
@@ -54,16 +54,16 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) | |||
54 | tcph = skb_header_pointer(*pskb, ip_hdrlen(*pskb), | 54 | tcph = skb_header_pointer(*pskb, ip_hdrlen(*pskb), |
55 | sizeof(_tcph), &_tcph); | 55 | sizeof(_tcph), &_tcph); |
56 | if (!tcph) | 56 | if (!tcph) |
57 | return 0; | 57 | return false; |
58 | 58 | ||
59 | if ((!(einfo->operation & IPT_ECN_OP_SET_ECE) || | 59 | if ((!(einfo->operation & IPT_ECN_OP_SET_ECE) || |
60 | tcph->ece == einfo->proto.tcp.ece) && | 60 | tcph->ece == einfo->proto.tcp.ece) && |
61 | ((!(einfo->operation & IPT_ECN_OP_SET_CWR) || | 61 | (!(einfo->operation & IPT_ECN_OP_SET_CWR) || |
62 | tcph->cwr == einfo->proto.tcp.cwr))) | 62 | tcph->cwr == einfo->proto.tcp.cwr)) |
63 | return 1; | 63 | return true; |
64 | 64 | ||
65 | if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph))) | 65 | if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph))) |
66 | return 0; | 66 | return false; |
67 | tcph = (void *)ip_hdr(*pskb) + ip_hdrlen(*pskb); | 67 | tcph = (void *)ip_hdr(*pskb) + ip_hdrlen(*pskb); |
68 | 68 | ||
69 | oldval = ((__be16 *)tcph)[6]; | 69 | oldval = ((__be16 *)tcph)[6]; |
@@ -74,7 +74,7 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) | |||
74 | 74 | ||
75 | nf_proto_csum_replace2(&tcph->check, *pskb, | 75 | nf_proto_csum_replace2(&tcph->check, *pskb, |
76 | oldval, ((__be16 *)tcph)[6], 0); | 76 | oldval, ((__be16 *)tcph)[6], 0); |
77 | return 1; | 77 | return true; |
78 | } | 78 | } |
79 | 79 | ||
80 | static unsigned int | 80 | static unsigned int |
@@ -99,7 +99,7 @@ target(struct sk_buff **pskb, | |||
99 | return XT_CONTINUE; | 99 | return XT_CONTINUE; |
100 | } | 100 | } |
101 | 101 | ||
102 | static int | 102 | static bool |
103 | checkentry(const char *tablename, | 103 | checkentry(const char *tablename, |
104 | const void *e_void, | 104 | const void *e_void, |
105 | const struct xt_target *target, | 105 | const struct xt_target *target, |
@@ -112,23 +112,23 @@ checkentry(const char *tablename, | |||
112 | if (einfo->operation & IPT_ECN_OP_MASK) { | 112 | if (einfo->operation & IPT_ECN_OP_MASK) { |
113 | printk(KERN_WARNING "ECN: unsupported ECN operation %x\n", | 113 | printk(KERN_WARNING "ECN: unsupported ECN operation %x\n", |
114 | einfo->operation); | 114 | einfo->operation); |
115 | return 0; | 115 | return false; |
116 | } | 116 | } |
117 | if (einfo->ip_ect & ~IPT_ECN_IP_MASK) { | 117 | if (einfo->ip_ect & ~IPT_ECN_IP_MASK) { |
118 | printk(KERN_WARNING "ECN: new ECT codepoint %x out of mask\n", | 118 | printk(KERN_WARNING "ECN: new ECT codepoint %x out of mask\n", |
119 | einfo->ip_ect); | 119 | einfo->ip_ect); |
120 | return 0; | 120 | return false; |
121 | } | 121 | } |
122 | if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) | 122 | if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) |
123 | && (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) { | 123 | && (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) { |
124 | printk(KERN_WARNING "ECN: cannot use TCP operations on a " | 124 | printk(KERN_WARNING "ECN: cannot use TCP operations on a " |
125 | "non-tcp rule\n"); | 125 | "non-tcp rule\n"); |
126 | return 0; | 126 | return false; |
127 | } | 127 | } |
128 | return 1; | 128 | return true; |
129 | } | 129 | } |
130 | 130 | ||
131 | static struct xt_target ipt_ecn_reg = { | 131 | static struct xt_target ipt_ecn_reg __read_mostly = { |
132 | .name = "ECN", | 132 | .name = "ECN", |
133 | .family = AF_INET, | 133 | .family = AF_INET, |
134 | .target = target, | 134 | .target = target, |
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c index a42c5cd968b1..5937ad150b9f 100644 --- a/net/ipv4/netfilter/ipt_LOG.c +++ b/net/ipv4/netfilter/ipt_LOG.c | |||
@@ -27,12 +27,6 @@ MODULE_LICENSE("GPL"); | |||
27 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 27 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
28 | MODULE_DESCRIPTION("iptables syslog logging module"); | 28 | MODULE_DESCRIPTION("iptables syslog logging module"); |
29 | 29 | ||
30 | #if 0 | ||
31 | #define DEBUGP printk | ||
32 | #else | ||
33 | #define DEBUGP(format, args...) | ||
34 | #endif | ||
35 | |||
36 | /* Use lock to serialize, so printks don't overlap */ | 30 | /* Use lock to serialize, so printks don't overlap */ |
37 | static DEFINE_SPINLOCK(log_lock); | 31 | static DEFINE_SPINLOCK(log_lock); |
38 | 32 | ||
@@ -41,7 +35,8 @@ static void dump_packet(const struct nf_loginfo *info, | |||
41 | const struct sk_buff *skb, | 35 | const struct sk_buff *skb, |
42 | unsigned int iphoff) | 36 | unsigned int iphoff) |
43 | { | 37 | { |
44 | struct iphdr _iph, *ih; | 38 | struct iphdr _iph; |
39 | const struct iphdr *ih; | ||
45 | unsigned int logflags; | 40 | unsigned int logflags; |
46 | 41 | ||
47 | if (info->type == NF_LOG_TYPE_LOG) | 42 | if (info->type == NF_LOG_TYPE_LOG) |
@@ -100,7 +95,8 @@ static void dump_packet(const struct nf_loginfo *info, | |||
100 | 95 | ||
101 | switch (ih->protocol) { | 96 | switch (ih->protocol) { |
102 | case IPPROTO_TCP: { | 97 | case IPPROTO_TCP: { |
103 | struct tcphdr _tcph, *th; | 98 | struct tcphdr _tcph; |
99 | const struct tcphdr *th; | ||
104 | 100 | ||
105 | /* Max length: 10 "PROTO=TCP " */ | 101 | /* Max length: 10 "PROTO=TCP " */ |
106 | printk("PROTO=TCP "); | 102 | printk("PROTO=TCP "); |
@@ -151,7 +147,7 @@ static void dump_packet(const struct nf_loginfo *info, | |||
151 | if ((logflags & IPT_LOG_TCPOPT) | 147 | if ((logflags & IPT_LOG_TCPOPT) |
152 | && th->doff * 4 > sizeof(struct tcphdr)) { | 148 | && th->doff * 4 > sizeof(struct tcphdr)) { |
153 | unsigned char _opt[4 * 15 - sizeof(struct tcphdr)]; | 149 | unsigned char _opt[4 * 15 - sizeof(struct tcphdr)]; |
154 | unsigned char *op; | 150 | const unsigned char *op; |
155 | unsigned int i, optsize; | 151 | unsigned int i, optsize; |
156 | 152 | ||
157 | optsize = th->doff * 4 - sizeof(struct tcphdr); | 153 | optsize = th->doff * 4 - sizeof(struct tcphdr); |
@@ -173,7 +169,8 @@ static void dump_packet(const struct nf_loginfo *info, | |||
173 | } | 169 | } |
174 | case IPPROTO_UDP: | 170 | case IPPROTO_UDP: |
175 | case IPPROTO_UDPLITE: { | 171 | case IPPROTO_UDPLITE: { |
176 | struct udphdr _udph, *uh; | 172 | struct udphdr _udph; |
173 | const struct udphdr *uh; | ||
177 | 174 | ||
178 | if (ih->protocol == IPPROTO_UDP) | 175 | if (ih->protocol == IPPROTO_UDP) |
179 | /* Max length: 10 "PROTO=UDP " */ | 176 | /* Max length: 10 "PROTO=UDP " */ |
@@ -200,7 +197,8 @@ static void dump_packet(const struct nf_loginfo *info, | |||
200 | break; | 197 | break; |
201 | } | 198 | } |
202 | case IPPROTO_ICMP: { | 199 | case IPPROTO_ICMP: { |
203 | struct icmphdr _icmph, *ich; | 200 | struct icmphdr _icmph; |
201 | const struct icmphdr *ich; | ||
204 | static const size_t required_len[NR_ICMP_TYPES+1] | 202 | static const size_t required_len[NR_ICMP_TYPES+1] |
205 | = { [ICMP_ECHOREPLY] = 4, | 203 | = { [ICMP_ECHOREPLY] = 4, |
206 | [ICMP_DEST_UNREACH] | 204 | [ICMP_DEST_UNREACH] |
@@ -285,7 +283,8 @@ static void dump_packet(const struct nf_loginfo *info, | |||
285 | } | 283 | } |
286 | /* Max Length */ | 284 | /* Max Length */ |
287 | case IPPROTO_AH: { | 285 | case IPPROTO_AH: { |
288 | struct ip_auth_hdr _ahdr, *ah; | 286 | struct ip_auth_hdr _ahdr; |
287 | const struct ip_auth_hdr *ah; | ||
289 | 288 | ||
290 | if (ntohs(ih->frag_off) & IP_OFFSET) | 289 | if (ntohs(ih->frag_off) & IP_OFFSET) |
291 | break; | 290 | break; |
@@ -307,7 +306,8 @@ static void dump_packet(const struct nf_loginfo *info, | |||
307 | break; | 306 | break; |
308 | } | 307 | } |
309 | case IPPROTO_ESP: { | 308 | case IPPROTO_ESP: { |
310 | struct ip_esp_hdr _esph, *eh; | 309 | struct ip_esp_hdr _esph; |
310 | const struct ip_esp_hdr *eh; | ||
311 | 311 | ||
312 | /* Max length: 10 "PROTO=ESP " */ | 312 | /* Max length: 10 "PROTO=ESP " */ |
313 | printk("PROTO=ESP "); | 313 | printk("PROTO=ESP "); |
@@ -385,11 +385,13 @@ ipt_log_packet(unsigned int pf, | |||
385 | out ? out->name : ""); | 385 | out ? out->name : ""); |
386 | #ifdef CONFIG_BRIDGE_NETFILTER | 386 | #ifdef CONFIG_BRIDGE_NETFILTER |
387 | if (skb->nf_bridge) { | 387 | if (skb->nf_bridge) { |
388 | struct net_device *physindev = skb->nf_bridge->physindev; | 388 | const struct net_device *physindev; |
389 | struct net_device *physoutdev = skb->nf_bridge->physoutdev; | 389 | const struct net_device *physoutdev; |
390 | 390 | ||
391 | physindev = skb->nf_bridge->physindev; | ||
391 | if (physindev && in != physindev) | 392 | if (physindev && in != physindev) |
392 | printk("PHYSIN=%s ", physindev->name); | 393 | printk("PHYSIN=%s ", physindev->name); |
394 | physoutdev = skb->nf_bridge->physoutdev; | ||
393 | if (physoutdev && out != physoutdev) | 395 | if (physoutdev && out != physoutdev) |
394 | printk("PHYSOUT=%s ", physoutdev->name); | 396 | printk("PHYSOUT=%s ", physoutdev->name); |
395 | } | 397 | } |
@@ -435,27 +437,27 @@ ipt_log_target(struct sk_buff **pskb, | |||
435 | return XT_CONTINUE; | 437 | return XT_CONTINUE; |
436 | } | 438 | } |
437 | 439 | ||
438 | static int ipt_log_checkentry(const char *tablename, | 440 | static bool ipt_log_checkentry(const char *tablename, |
439 | const void *e, | 441 | const void *e, |
440 | const struct xt_target *target, | 442 | const struct xt_target *target, |
441 | void *targinfo, | 443 | void *targinfo, |
442 | unsigned int hook_mask) | 444 | unsigned int hook_mask) |
443 | { | 445 | { |
444 | const struct ipt_log_info *loginfo = targinfo; | 446 | const struct ipt_log_info *loginfo = targinfo; |
445 | 447 | ||
446 | if (loginfo->level >= 8) { | 448 | if (loginfo->level >= 8) { |
447 | DEBUGP("LOG: level %u >= 8\n", loginfo->level); | 449 | pr_debug("LOG: level %u >= 8\n", loginfo->level); |
448 | return 0; | 450 | return false; |
449 | } | 451 | } |
450 | if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') { | 452 | if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') { |
451 | DEBUGP("LOG: prefix term %i\n", | 453 | pr_debug("LOG: prefix term %i\n", |
452 | loginfo->prefix[sizeof(loginfo->prefix)-1]); | 454 | loginfo->prefix[sizeof(loginfo->prefix)-1]); |
453 | return 0; | 455 | return false; |
454 | } | 456 | } |
455 | return 1; | 457 | return true; |
456 | } | 458 | } |
457 | 459 | ||
458 | static struct xt_target ipt_log_reg = { | 460 | static struct xt_target ipt_log_reg __read_mostly = { |
459 | .name = "LOG", | 461 | .name = "LOG", |
460 | .family = AF_INET, | 462 | .family = AF_INET, |
461 | .target = ipt_log_target, | 463 | .target = ipt_log_target, |
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index d4f2d7775330..7c4e4be7c8b3 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c | |||
@@ -27,17 +27,11 @@ MODULE_LICENSE("GPL"); | |||
27 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 27 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
28 | MODULE_DESCRIPTION("iptables MASQUERADE target module"); | 28 | MODULE_DESCRIPTION("iptables MASQUERADE target module"); |
29 | 29 | ||
30 | #if 0 | ||
31 | #define DEBUGP printk | ||
32 | #else | ||
33 | #define DEBUGP(format, args...) | ||
34 | #endif | ||
35 | |||
36 | /* Lock protects masq region inside conntrack */ | 30 | /* Lock protects masq region inside conntrack */ |
37 | static DEFINE_RWLOCK(masq_lock); | 31 | static DEFINE_RWLOCK(masq_lock); |
38 | 32 | ||
39 | /* FIXME: Multiple targets. --RR */ | 33 | /* FIXME: Multiple targets. --RR */ |
40 | static int | 34 | static bool |
41 | masquerade_check(const char *tablename, | 35 | masquerade_check(const char *tablename, |
42 | const void *e, | 36 | const void *e, |
43 | const struct xt_target *target, | 37 | const struct xt_target *target, |
@@ -47,14 +41,14 @@ masquerade_check(const char *tablename, | |||
47 | const struct nf_nat_multi_range_compat *mr = targinfo; | 41 | const struct nf_nat_multi_range_compat *mr = targinfo; |
48 | 42 | ||
49 | if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { | 43 | if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { |
50 | DEBUGP("masquerade_check: bad MAP_IPS.\n"); | 44 | pr_debug("masquerade_check: bad MAP_IPS.\n"); |
51 | return 0; | 45 | return false; |
52 | } | 46 | } |
53 | if (mr->rangesize != 1) { | 47 | if (mr->rangesize != 1) { |
54 | DEBUGP("masquerade_check: bad rangesize %u.\n", mr->rangesize); | 48 | pr_debug("masquerade_check: bad rangesize %u\n", mr->rangesize); |
55 | return 0; | 49 | return false; |
56 | } | 50 | } |
57 | return 1; | 51 | return true; |
58 | } | 52 | } |
59 | 53 | ||
60 | static unsigned int | 54 | static unsigned int |
@@ -70,7 +64,7 @@ masquerade_target(struct sk_buff **pskb, | |||
70 | enum ip_conntrack_info ctinfo; | 64 | enum ip_conntrack_info ctinfo; |
71 | struct nf_nat_range newrange; | 65 | struct nf_nat_range newrange; |
72 | const struct nf_nat_multi_range_compat *mr; | 66 | const struct nf_nat_multi_range_compat *mr; |
73 | struct rtable *rt; | 67 | const struct rtable *rt; |
74 | __be32 newsrc; | 68 | __be32 newsrc; |
75 | 69 | ||
76 | NF_CT_ASSERT(hooknum == NF_IP_POST_ROUTING); | 70 | NF_CT_ASSERT(hooknum == NF_IP_POST_ROUTING); |
@@ -109,10 +103,10 @@ masquerade_target(struct sk_buff **pskb, | |||
109 | return nf_nat_setup_info(ct, &newrange, hooknum); | 103 | return nf_nat_setup_info(ct, &newrange, hooknum); |
110 | } | 104 | } |
111 | 105 | ||
112 | static inline int | 106 | static int |
113 | device_cmp(struct nf_conn *i, void *ifindex) | 107 | device_cmp(struct nf_conn *i, void *ifindex) |
114 | { | 108 | { |
115 | struct nf_conn_nat *nat = nfct_nat(i); | 109 | const struct nf_conn_nat *nat = nfct_nat(i); |
116 | int ret; | 110 | int ret; |
117 | 111 | ||
118 | if (!nat) | 112 | if (!nat) |
@@ -129,7 +123,7 @@ static int masq_device_event(struct notifier_block *this, | |||
129 | unsigned long event, | 123 | unsigned long event, |
130 | void *ptr) | 124 | void *ptr) |
131 | { | 125 | { |
132 | struct net_device *dev = ptr; | 126 | const struct net_device *dev = ptr; |
133 | 127 | ||
134 | if (event == NETDEV_DOWN) { | 128 | if (event == NETDEV_DOWN) { |
135 | /* Device was downed. Search entire table for | 129 | /* Device was downed. Search entire table for |
@@ -147,7 +141,7 @@ static int masq_inet_event(struct notifier_block *this, | |||
147 | unsigned long event, | 141 | unsigned long event, |
148 | void *ptr) | 142 | void *ptr) |
149 | { | 143 | { |
150 | struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev; | 144 | const struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev; |
151 | 145 | ||
152 | if (event == NETDEV_DOWN) { | 146 | if (event == NETDEV_DOWN) { |
153 | /* IP address was deleted. Search entire table for | 147 | /* IP address was deleted. Search entire table for |
@@ -169,7 +163,7 @@ static struct notifier_block masq_inet_notifier = { | |||
169 | .notifier_call = masq_inet_event, | 163 | .notifier_call = masq_inet_event, |
170 | }; | 164 | }; |
171 | 165 | ||
172 | static struct xt_target masquerade = { | 166 | static struct xt_target masquerade __read_mostly = { |
173 | .name = "MASQUERADE", | 167 | .name = "MASQUERADE", |
174 | .family = AF_INET, | 168 | .family = AF_INET, |
175 | .target = masquerade_target, | 169 | .target = masquerade_target, |
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c index 068c69bce30e..41a011d5a065 100644 --- a/net/ipv4/netfilter/ipt_NETMAP.c +++ b/net/ipv4/netfilter/ipt_NETMAP.c | |||
@@ -18,18 +18,11 @@ | |||
18 | #include <linux/netfilter/x_tables.h> | 18 | #include <linux/netfilter/x_tables.h> |
19 | #include <net/netfilter/nf_nat_rule.h> | 19 | #include <net/netfilter/nf_nat_rule.h> |
20 | 20 | ||
21 | #define MODULENAME "NETMAP" | ||
22 | MODULE_LICENSE("GPL"); | 21 | MODULE_LICENSE("GPL"); |
23 | MODULE_AUTHOR("Svenning Soerensen <svenning@post5.tele.dk>"); | 22 | MODULE_AUTHOR("Svenning Soerensen <svenning@post5.tele.dk>"); |
24 | MODULE_DESCRIPTION("iptables 1:1 NAT mapping of IP networks target"); | 23 | MODULE_DESCRIPTION("iptables 1:1 NAT mapping of IP networks target"); |
25 | 24 | ||
26 | #if 0 | 25 | static bool |
27 | #define DEBUGP printk | ||
28 | #else | ||
29 | #define DEBUGP(format, args...) | ||
30 | #endif | ||
31 | |||
32 | static int | ||
33 | check(const char *tablename, | 26 | check(const char *tablename, |
34 | const void *e, | 27 | const void *e, |
35 | const struct xt_target *target, | 28 | const struct xt_target *target, |
@@ -39,14 +32,14 @@ check(const char *tablename, | |||
39 | const struct nf_nat_multi_range_compat *mr = targinfo; | 32 | const struct nf_nat_multi_range_compat *mr = targinfo; |
40 | 33 | ||
41 | if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) { | 34 | if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) { |
42 | DEBUGP(MODULENAME":check: bad MAP_IPS.\n"); | 35 | pr_debug("NETMAP:check: bad MAP_IPS.\n"); |
43 | return 0; | 36 | return false; |
44 | } | 37 | } |
45 | if (mr->rangesize != 1) { | 38 | if (mr->rangesize != 1) { |
46 | DEBUGP(MODULENAME":check: bad rangesize %u.\n", mr->rangesize); | 39 | pr_debug("NETMAP:check: bad rangesize %u.\n", mr->rangesize); |
47 | return 0; | 40 | return false; |
48 | } | 41 | } |
49 | return 1; | 42 | return true; |
50 | } | 43 | } |
51 | 44 | ||
52 | static unsigned int | 45 | static unsigned int |
@@ -85,8 +78,8 @@ target(struct sk_buff **pskb, | |||
85 | return nf_nat_setup_info(ct, &newrange, hooknum); | 78 | return nf_nat_setup_info(ct, &newrange, hooknum); |
86 | } | 79 | } |
87 | 80 | ||
88 | static struct xt_target target_module = { | 81 | static struct xt_target target_module __read_mostly = { |
89 | .name = MODULENAME, | 82 | .name = "NETMAP", |
90 | .family = AF_INET, | 83 | .family = AF_INET, |
91 | .target = target, | 84 | .target = target, |
92 | .targetsize = sizeof(struct nf_nat_multi_range_compat), | 85 | .targetsize = sizeof(struct nf_nat_multi_range_compat), |
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c index 68cc76a198eb..6ac7a2373316 100644 --- a/net/ipv4/netfilter/ipt_REDIRECT.c +++ b/net/ipv4/netfilter/ipt_REDIRECT.c | |||
@@ -25,14 +25,8 @@ MODULE_LICENSE("GPL"); | |||
25 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 25 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
26 | MODULE_DESCRIPTION("iptables REDIRECT target module"); | 26 | MODULE_DESCRIPTION("iptables REDIRECT target module"); |
27 | 27 | ||
28 | #if 0 | ||
29 | #define DEBUGP printk | ||
30 | #else | ||
31 | #define DEBUGP(format, args...) | ||
32 | #endif | ||
33 | |||
34 | /* FIXME: Take multiple ranges --RR */ | 28 | /* FIXME: Take multiple ranges --RR */ |
35 | static int | 29 | static bool |
36 | redirect_check(const char *tablename, | 30 | redirect_check(const char *tablename, |
37 | const void *e, | 31 | const void *e, |
38 | const struct xt_target *target, | 32 | const struct xt_target *target, |
@@ -42,14 +36,14 @@ redirect_check(const char *tablename, | |||
42 | const struct nf_nat_multi_range_compat *mr = targinfo; | 36 | const struct nf_nat_multi_range_compat *mr = targinfo; |
43 | 37 | ||
44 | if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { | 38 | if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) { |
45 | DEBUGP("redirect_check: bad MAP_IPS.\n"); | 39 | pr_debug("redirect_check: bad MAP_IPS.\n"); |
46 | return 0; | 40 | return false; |
47 | } | 41 | } |
48 | if (mr->rangesize != 1) { | 42 | if (mr->rangesize != 1) { |
49 | DEBUGP("redirect_check: bad rangesize %u.\n", mr->rangesize); | 43 | pr_debug("redirect_check: bad rangesize %u.\n", mr->rangesize); |
50 | return 0; | 44 | return false; |
51 | } | 45 | } |
52 | return 1; | 46 | return true; |
53 | } | 47 | } |
54 | 48 | ||
55 | static unsigned int | 49 | static unsigned int |
@@ -101,7 +95,7 @@ redirect_target(struct sk_buff **pskb, | |||
101 | return nf_nat_setup_info(ct, &newrange, hooknum); | 95 | return nf_nat_setup_info(ct, &newrange, hooknum); |
102 | } | 96 | } |
103 | 97 | ||
104 | static struct xt_target redirect_reg = { | 98 | static struct xt_target redirect_reg __read_mostly = { |
105 | .name = "REDIRECT", | 99 | .name = "REDIRECT", |
106 | .family = AF_INET, | 100 | .family = AF_INET, |
107 | .target = redirect_target, | 101 | .target = redirect_target, |
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index 9041e0741f6f..cb038c8fbc9d 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c | |||
@@ -31,12 +31,6 @@ MODULE_LICENSE("GPL"); | |||
31 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 31 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
32 | MODULE_DESCRIPTION("iptables REJECT target module"); | 32 | MODULE_DESCRIPTION("iptables REJECT target module"); |
33 | 33 | ||
34 | #if 0 | ||
35 | #define DEBUGP printk | ||
36 | #else | ||
37 | #define DEBUGP(format, args...) | ||
38 | #endif | ||
39 | |||
40 | /* Send RST reply */ | 34 | /* Send RST reply */ |
41 | static void send_reset(struct sk_buff *oldskb, int hook) | 35 | static void send_reset(struct sk_buff *oldskb, int hook) |
42 | { | 36 | { |
@@ -122,7 +116,7 @@ static void send_reset(struct sk_buff *oldskb, int hook) | |||
122 | tcph->check = 0; | 116 | tcph->check = 0; |
123 | tcph->check = tcp_v4_check(sizeof(struct tcphdr), | 117 | tcph->check = tcp_v4_check(sizeof(struct tcphdr), |
124 | niph->saddr, niph->daddr, | 118 | niph->saddr, niph->daddr, |
125 | csum_partial((char *)tcph, | 119 | csum_partial(tcph, |
126 | sizeof(struct tcphdr), 0)); | 120 | sizeof(struct tcphdr), 0)); |
127 | 121 | ||
128 | /* Set DF, id = 0 */ | 122 | /* Set DF, id = 0 */ |
@@ -217,30 +211,30 @@ static unsigned int reject(struct sk_buff **pskb, | |||
217 | return NF_DROP; | 211 | return NF_DROP; |
218 | } | 212 | } |
219 | 213 | ||
220 | static int check(const char *tablename, | 214 | static bool check(const char *tablename, |
221 | const void *e_void, | 215 | const void *e_void, |
222 | const struct xt_target *target, | 216 | const struct xt_target *target, |
223 | void *targinfo, | 217 | void *targinfo, |
224 | unsigned int hook_mask) | 218 | unsigned int hook_mask) |
225 | { | 219 | { |
226 | const struct ipt_reject_info *rejinfo = targinfo; | 220 | const struct ipt_reject_info *rejinfo = targinfo; |
227 | const struct ipt_entry *e = e_void; | 221 | const struct ipt_entry *e = e_void; |
228 | 222 | ||
229 | if (rejinfo->with == IPT_ICMP_ECHOREPLY) { | 223 | if (rejinfo->with == IPT_ICMP_ECHOREPLY) { |
230 | printk("REJECT: ECHOREPLY no longer supported.\n"); | 224 | printk("ipt_REJECT: ECHOREPLY no longer supported.\n"); |
231 | return 0; | 225 | return false; |
232 | } else if (rejinfo->with == IPT_TCP_RESET) { | 226 | } else if (rejinfo->with == IPT_TCP_RESET) { |
233 | /* Must specify that it's a TCP packet */ | 227 | /* Must specify that it's a TCP packet */ |
234 | if (e->ip.proto != IPPROTO_TCP | 228 | if (e->ip.proto != IPPROTO_TCP |
235 | || (e->ip.invflags & XT_INV_PROTO)) { | 229 | || (e->ip.invflags & XT_INV_PROTO)) { |
236 | DEBUGP("REJECT: TCP_RESET invalid for non-tcp\n"); | 230 | printk("ipt_REJECT: TCP_RESET invalid for non-tcp\n"); |
237 | return 0; | 231 | return false; |
238 | } | 232 | } |
239 | } | 233 | } |
240 | return 1; | 234 | return true; |
241 | } | 235 | } |
242 | 236 | ||
243 | static struct xt_target ipt_reject_reg = { | 237 | static struct xt_target ipt_reject_reg __read_mostly = { |
244 | .name = "REJECT", | 238 | .name = "REJECT", |
245 | .family = AF_INET, | 239 | .family = AF_INET, |
246 | .target = reject, | 240 | .target = reject, |
diff --git a/net/ipv4/netfilter/ipt_SAME.c b/net/ipv4/netfilter/ipt_SAME.c index 511e5ff84938..97641f1a97f6 100644 --- a/net/ipv4/netfilter/ipt_SAME.c +++ b/net/ipv4/netfilter/ipt_SAME.c | |||
@@ -27,13 +27,7 @@ MODULE_LICENSE("GPL"); | |||
27 | MODULE_AUTHOR("Martin Josefsson <gandalf@wlug.westbo.se>"); | 27 | MODULE_AUTHOR("Martin Josefsson <gandalf@wlug.westbo.se>"); |
28 | MODULE_DESCRIPTION("iptables special SNAT module for consistent sourceip"); | 28 | MODULE_DESCRIPTION("iptables special SNAT module for consistent sourceip"); |
29 | 29 | ||
30 | #if 0 | 30 | static bool |
31 | #define DEBUGP printk | ||
32 | #else | ||
33 | #define DEBUGP(format, args...) | ||
34 | #endif | ||
35 | |||
36 | static int | ||
37 | same_check(const char *tablename, | 31 | same_check(const char *tablename, |
38 | const void *e, | 32 | const void *e, |
39 | const struct xt_target *target, | 33 | const struct xt_target *target, |
@@ -46,58 +40,56 @@ same_check(const char *tablename, | |||
46 | mr->ipnum = 0; | 40 | mr->ipnum = 0; |
47 | 41 | ||
48 | if (mr->rangesize < 1) { | 42 | if (mr->rangesize < 1) { |
49 | DEBUGP("same_check: need at least one dest range.\n"); | 43 | pr_debug("same_check: need at least one dest range.\n"); |
50 | return 0; | 44 | return false; |
51 | } | 45 | } |
52 | if (mr->rangesize > IPT_SAME_MAX_RANGE) { | 46 | if (mr->rangesize > IPT_SAME_MAX_RANGE) { |
53 | DEBUGP("same_check: too many ranges specified, maximum " | 47 | pr_debug("same_check: too many ranges specified, maximum " |
54 | "is %u ranges\n", | 48 | "is %u ranges\n", IPT_SAME_MAX_RANGE); |
55 | IPT_SAME_MAX_RANGE); | 49 | return false; |
56 | return 0; | ||
57 | } | 50 | } |
58 | for (count = 0; count < mr->rangesize; count++) { | 51 | for (count = 0; count < mr->rangesize; count++) { |
59 | if (ntohl(mr->range[count].min_ip) > | 52 | if (ntohl(mr->range[count].min_ip) > |
60 | ntohl(mr->range[count].max_ip)) { | 53 | ntohl(mr->range[count].max_ip)) { |
61 | DEBUGP("same_check: min_ip is larger than max_ip in " | 54 | pr_debug("same_check: min_ip is larger than max_ip in " |
62 | "range `%u.%u.%u.%u-%u.%u.%u.%u'.\n", | 55 | "range `%u.%u.%u.%u-%u.%u.%u.%u'.\n", |
63 | NIPQUAD(mr->range[count].min_ip), | 56 | NIPQUAD(mr->range[count].min_ip), |
64 | NIPQUAD(mr->range[count].max_ip)); | 57 | NIPQUAD(mr->range[count].max_ip)); |
65 | return 0; | 58 | return false; |
66 | } | 59 | } |
67 | if (!(mr->range[count].flags & IP_NAT_RANGE_MAP_IPS)) { | 60 | if (!(mr->range[count].flags & IP_NAT_RANGE_MAP_IPS)) { |
68 | DEBUGP("same_check: bad MAP_IPS.\n"); | 61 | pr_debug("same_check: bad MAP_IPS.\n"); |
69 | return 0; | 62 | return false; |
70 | } | 63 | } |
71 | rangeip = (ntohl(mr->range[count].max_ip) - | 64 | rangeip = (ntohl(mr->range[count].max_ip) - |
72 | ntohl(mr->range[count].min_ip) + 1); | 65 | ntohl(mr->range[count].min_ip) + 1); |
73 | mr->ipnum += rangeip; | 66 | mr->ipnum += rangeip; |
74 | 67 | ||
75 | DEBUGP("same_check: range %u, ipnum = %u\n", count, rangeip); | 68 | pr_debug("same_check: range %u, ipnum = %u\n", count, rangeip); |
76 | } | 69 | } |
77 | DEBUGP("same_check: total ipaddresses = %u\n", mr->ipnum); | 70 | pr_debug("same_check: total ipaddresses = %u\n", mr->ipnum); |
78 | 71 | ||
79 | mr->iparray = kmalloc((sizeof(u_int32_t) * mr->ipnum), GFP_KERNEL); | 72 | mr->iparray = kmalloc((sizeof(u_int32_t) * mr->ipnum), GFP_KERNEL); |
80 | if (!mr->iparray) { | 73 | if (!mr->iparray) { |
81 | DEBUGP("same_check: Couldn't allocate %u bytes " | 74 | pr_debug("same_check: Couldn't allocate %Zu bytes " |
82 | "for %u ipaddresses!\n", | 75 | "for %u ipaddresses!\n", |
83 | (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); | 76 | (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); |
84 | return 0; | 77 | return false; |
85 | } | 78 | } |
86 | DEBUGP("same_check: Allocated %u bytes for %u ipaddresses.\n", | 79 | pr_debug("same_check: Allocated %Zu bytes for %u ipaddresses.\n", |
87 | (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); | 80 | (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); |
88 | 81 | ||
89 | for (count = 0; count < mr->rangesize; count++) { | 82 | for (count = 0; count < mr->rangesize; count++) { |
90 | for (countess = ntohl(mr->range[count].min_ip); | 83 | for (countess = ntohl(mr->range[count].min_ip); |
91 | countess <= ntohl(mr->range[count].max_ip); | 84 | countess <= ntohl(mr->range[count].max_ip); |
92 | countess++) { | 85 | countess++) { |
93 | mr->iparray[index] = countess; | 86 | mr->iparray[index] = countess; |
94 | DEBUGP("same_check: Added ipaddress `%u.%u.%u.%u' " | 87 | pr_debug("same_check: Added ipaddress `%u.%u.%u.%u' " |
95 | "in index %u.\n", | 88 | "in index %u.\n", HIPQUAD(countess), index); |
96 | HIPQUAD(countess), index); | ||
97 | index++; | 89 | index++; |
98 | } | 90 | } |
99 | } | 91 | } |
100 | return 1; | 92 | return true; |
101 | } | 93 | } |
102 | 94 | ||
103 | static void | 95 | static void |
@@ -107,8 +99,8 @@ same_destroy(const struct xt_target *target, void *targinfo) | |||
107 | 99 | ||
108 | kfree(mr->iparray); | 100 | kfree(mr->iparray); |
109 | 101 | ||
110 | DEBUGP("same_destroy: Deallocated %u bytes for %u ipaddresses.\n", | 102 | pr_debug("same_destroy: Deallocated %Zu bytes for %u ipaddresses.\n", |
111 | (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); | 103 | (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); |
112 | } | 104 | } |
113 | 105 | ||
114 | static unsigned int | 106 | static unsigned int |
@@ -146,10 +138,9 @@ same_target(struct sk_buff **pskb, | |||
146 | 138 | ||
147 | new_ip = htonl(same->iparray[aindex]); | 139 | new_ip = htonl(same->iparray[aindex]); |
148 | 140 | ||
149 | DEBUGP("ipt_SAME: src=%u.%u.%u.%u dst=%u.%u.%u.%u, " | 141 | pr_debug("ipt_SAME: src=%u.%u.%u.%u dst=%u.%u.%u.%u, " |
150 | "new src=%u.%u.%u.%u\n", | 142 | "new src=%u.%u.%u.%u\n", |
151 | NIPQUAD(t->src.ip), NIPQUAD(t->dst.ip), | 143 | NIPQUAD(t->src.u3.ip), NIPQUAD(t->dst.u3.ip), NIPQUAD(new_ip)); |
152 | NIPQUAD(new_ip)); | ||
153 | 144 | ||
154 | /* Transfer from original range. */ | 145 | /* Transfer from original range. */ |
155 | newrange = ((struct nf_nat_range) | 146 | newrange = ((struct nf_nat_range) |
@@ -161,7 +152,7 @@ same_target(struct sk_buff **pskb, | |||
161 | return nf_nat_setup_info(ct, &newrange, hooknum); | 152 | return nf_nat_setup_info(ct, &newrange, hooknum); |
162 | } | 153 | } |
163 | 154 | ||
164 | static struct xt_target same_reg = { | 155 | static struct xt_target same_reg __read_mostly = { |
165 | .name = "SAME", | 156 | .name = "SAME", |
166 | .family = AF_INET, | 157 | .family = AF_INET, |
167 | .target = same_target, | 158 | .target = same_target, |
diff --git a/net/ipv4/netfilter/ipt_TOS.c b/net/ipv4/netfilter/ipt_TOS.c index 0ad02f249837..25f5d0b39065 100644 --- a/net/ipv4/netfilter/ipt_TOS.c +++ b/net/ipv4/netfilter/ipt_TOS.c | |||
@@ -43,7 +43,7 @@ target(struct sk_buff **pskb, | |||
43 | return XT_CONTINUE; | 43 | return XT_CONTINUE; |
44 | } | 44 | } |
45 | 45 | ||
46 | static int | 46 | static bool |
47 | checkentry(const char *tablename, | 47 | checkentry(const char *tablename, |
48 | const void *e_void, | 48 | const void *e_void, |
49 | const struct xt_target *target, | 49 | const struct xt_target *target, |
@@ -58,12 +58,12 @@ checkentry(const char *tablename, | |||
58 | && tos != IPTOS_MINCOST | 58 | && tos != IPTOS_MINCOST |
59 | && tos != IPTOS_NORMALSVC) { | 59 | && tos != IPTOS_NORMALSVC) { |
60 | printk(KERN_WARNING "TOS: bad tos value %#x\n", tos); | 60 | printk(KERN_WARNING "TOS: bad tos value %#x\n", tos); |
61 | return 0; | 61 | return false; |
62 | } | 62 | } |
63 | return 1; | 63 | return true; |
64 | } | 64 | } |
65 | 65 | ||
66 | static struct xt_target ipt_tos_reg = { | 66 | static struct xt_target ipt_tos_reg __read_mostly = { |
67 | .name = "TOS", | 67 | .name = "TOS", |
68 | .family = AF_INET, | 68 | .family = AF_INET, |
69 | .target = target, | 69 | .target = target, |
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c index a991ec7bd4e7..2b54e7b0cfe8 100644 --- a/net/ipv4/netfilter/ipt_TTL.c +++ b/net/ipv4/netfilter/ipt_TTL.c | |||
@@ -62,25 +62,25 @@ ipt_ttl_target(struct sk_buff **pskb, | |||
62 | return XT_CONTINUE; | 62 | return XT_CONTINUE; |
63 | } | 63 | } |
64 | 64 | ||
65 | static int ipt_ttl_checkentry(const char *tablename, | 65 | static bool ipt_ttl_checkentry(const char *tablename, |
66 | const void *e, | 66 | const void *e, |
67 | const struct xt_target *target, | 67 | const struct xt_target *target, |
68 | void *targinfo, | 68 | void *targinfo, |
69 | unsigned int hook_mask) | 69 | unsigned int hook_mask) |
70 | { | 70 | { |
71 | struct ipt_TTL_info *info = targinfo; | 71 | const struct ipt_TTL_info *info = targinfo; |
72 | 72 | ||
73 | if (info->mode > IPT_TTL_MAXMODE) { | 73 | if (info->mode > IPT_TTL_MAXMODE) { |
74 | printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n", | 74 | printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n", |
75 | info->mode); | 75 | info->mode); |
76 | return 0; | 76 | return false; |
77 | } | 77 | } |
78 | if ((info->mode != IPT_TTL_SET) && (info->ttl == 0)) | 78 | if (info->mode != IPT_TTL_SET && info->ttl == 0) |
79 | return 0; | 79 | return false; |
80 | return 1; | 80 | return true; |
81 | } | 81 | } |
82 | 82 | ||
83 | static struct xt_target ipt_TTL = { | 83 | static struct xt_target ipt_TTL __read_mostly = { |
84 | .name = "TTL", | 84 | .name = "TTL", |
85 | .family = AF_INET, | 85 | .family = AF_INET, |
86 | .target = ipt_ttl_target, | 86 | .target = ipt_ttl_target, |
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index 23b607b33b32..6ca43e4ca7e3 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c | |||
@@ -55,13 +55,6 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG); | |||
55 | #define ULOG_NL_EVENT 111 /* Harald's favorite number */ | 55 | #define ULOG_NL_EVENT 111 /* Harald's favorite number */ |
56 | #define ULOG_MAXNLGROUPS 32 /* numer of nlgroups */ | 56 | #define ULOG_MAXNLGROUPS 32 /* numer of nlgroups */ |
57 | 57 | ||
58 | #if 0 | ||
59 | #define DEBUGP(format, args...) printk("%s:%s:" format, \ | ||
60 | __FILE__, __FUNCTION__ , ## args) | ||
61 | #else | ||
62 | #define DEBUGP(format, args...) | ||
63 | #endif | ||
64 | |||
65 | #define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0) | 58 | #define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0) |
66 | 59 | ||
67 | static unsigned int nlbufsiz = NLMSG_GOODSIZE; | 60 | static unsigned int nlbufsiz = NLMSG_GOODSIZE; |
@@ -96,12 +89,12 @@ static void ulog_send(unsigned int nlgroupnum) | |||
96 | ulog_buff_t *ub = &ulog_buffers[nlgroupnum]; | 89 | ulog_buff_t *ub = &ulog_buffers[nlgroupnum]; |
97 | 90 | ||
98 | if (timer_pending(&ub->timer)) { | 91 | if (timer_pending(&ub->timer)) { |
99 | DEBUGP("ipt_ULOG: ulog_send: timer was pending, deleting\n"); | 92 | pr_debug("ipt_ULOG: ulog_send: timer was pending, deleting\n"); |
100 | del_timer(&ub->timer); | 93 | del_timer(&ub->timer); |
101 | } | 94 | } |
102 | 95 | ||
103 | if (!ub->skb) { | 96 | if (!ub->skb) { |
104 | DEBUGP("ipt_ULOG: ulog_send: nothing to send\n"); | 97 | pr_debug("ipt_ULOG: ulog_send: nothing to send\n"); |
105 | return; | 98 | return; |
106 | } | 99 | } |
107 | 100 | ||
@@ -110,8 +103,8 @@ static void ulog_send(unsigned int nlgroupnum) | |||
110 | ub->lastnlh->nlmsg_type = NLMSG_DONE; | 103 | ub->lastnlh->nlmsg_type = NLMSG_DONE; |
111 | 104 | ||
112 | NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1; | 105 | NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1; |
113 | DEBUGP("ipt_ULOG: throwing %d packets to netlink group %u\n", | 106 | pr_debug("ipt_ULOG: throwing %d packets to netlink group %u\n", |
114 | ub->qlen, nlgroupnum + 1); | 107 | ub->qlen, nlgroupnum + 1); |
115 | netlink_broadcast(nflognl, ub->skb, 0, nlgroupnum + 1, GFP_ATOMIC); | 108 | netlink_broadcast(nflognl, ub->skb, 0, nlgroupnum + 1, GFP_ATOMIC); |
116 | 109 | ||
117 | ub->qlen = 0; | 110 | ub->qlen = 0; |
@@ -123,7 +116,7 @@ static void ulog_send(unsigned int nlgroupnum) | |||
123 | /* timer function to flush queue in flushtimeout time */ | 116 | /* timer function to flush queue in flushtimeout time */ |
124 | static void ulog_timer(unsigned long data) | 117 | static void ulog_timer(unsigned long data) |
125 | { | 118 | { |
126 | DEBUGP("ipt_ULOG: timer function called, calling ulog_send\n"); | 119 | pr_debug("ipt_ULOG: timer function called, calling ulog_send\n"); |
127 | 120 | ||
128 | /* lock to protect against somebody modifying our structure | 121 | /* lock to protect against somebody modifying our structure |
129 | * from ipt_ulog_target at the same time */ | 122 | * from ipt_ulog_target at the same time */ |
@@ -179,12 +172,10 @@ static void ipt_ulog_packet(unsigned int hooknum, | |||
179 | unsigned int groupnum = ffs(loginfo->nl_group) - 1; | 172 | unsigned int groupnum = ffs(loginfo->nl_group) - 1; |
180 | 173 | ||
181 | /* calculate the size of the skb needed */ | 174 | /* calculate the size of the skb needed */ |
182 | if ((loginfo->copy_range == 0) || | 175 | if (loginfo->copy_range == 0 || loginfo->copy_range > skb->len) |
183 | (loginfo->copy_range > skb->len)) { | ||
184 | copy_len = skb->len; | 176 | copy_len = skb->len; |
185 | } else { | 177 | else |
186 | copy_len = loginfo->copy_range; | 178 | copy_len = loginfo->copy_range; |
187 | } | ||
188 | 179 | ||
189 | size = NLMSG_SPACE(sizeof(*pm) + copy_len); | 180 | size = NLMSG_SPACE(sizeof(*pm) + copy_len); |
190 | 181 | ||
@@ -206,8 +197,8 @@ static void ipt_ulog_packet(unsigned int hooknum, | |||
206 | goto alloc_failure; | 197 | goto alloc_failure; |
207 | } | 198 | } |
208 | 199 | ||
209 | DEBUGP("ipt_ULOG: qlen %d, qthreshold %d\n", ub->qlen, | 200 | pr_debug("ipt_ULOG: qlen %d, qthreshold %Zu\n", ub->qlen, |
210 | loginfo->qthreshold); | 201 | loginfo->qthreshold); |
211 | 202 | ||
212 | /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */ | 203 | /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */ |
213 | nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, | 204 | nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, |
@@ -257,9 +248,8 @@ static void ipt_ulog_packet(unsigned int hooknum, | |||
257 | BUG(); | 248 | BUG(); |
258 | 249 | ||
259 | /* check if we are building multi-part messages */ | 250 | /* check if we are building multi-part messages */ |
260 | if (ub->qlen > 1) { | 251 | if (ub->qlen > 1) |
261 | ub->lastnlh->nlmsg_flags |= NLM_F_MULTI; | 252 | ub->lastnlh->nlmsg_flags |= NLM_F_MULTI; |
262 | } | ||
263 | 253 | ||
264 | ub->lastnlh = nlh; | 254 | ub->lastnlh = nlh; |
265 | 255 | ||
@@ -328,25 +318,25 @@ static void ipt_logfn(unsigned int pf, | |||
328 | ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix); | 318 | ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix); |
329 | } | 319 | } |
330 | 320 | ||
331 | static int ipt_ulog_checkentry(const char *tablename, | 321 | static bool ipt_ulog_checkentry(const char *tablename, |
332 | const void *e, | 322 | const void *e, |
333 | const struct xt_target *target, | 323 | const struct xt_target *target, |
334 | void *targinfo, | 324 | void *targinfo, |
335 | unsigned int hookmask) | 325 | unsigned int hookmask) |
336 | { | 326 | { |
337 | struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo; | 327 | const struct ipt_ulog_info *loginfo = targinfo; |
338 | 328 | ||
339 | if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') { | 329 | if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') { |
340 | DEBUGP("ipt_ULOG: prefix term %i\n", | 330 | pr_debug("ipt_ULOG: prefix term %i\n", |
341 | loginfo->prefix[sizeof(loginfo->prefix) - 1]); | 331 | loginfo->prefix[sizeof(loginfo->prefix) - 1]); |
342 | return 0; | 332 | return false; |
343 | } | 333 | } |
344 | if (loginfo->qthreshold > ULOG_MAX_QLEN) { | 334 | if (loginfo->qthreshold > ULOG_MAX_QLEN) { |
345 | DEBUGP("ipt_ULOG: queue threshold %i > MAX_QLEN\n", | 335 | pr_debug("ipt_ULOG: queue threshold %Zu > MAX_QLEN\n", |
346 | loginfo->qthreshold); | 336 | loginfo->qthreshold); |
347 | return 0; | 337 | return false; |
348 | } | 338 | } |
349 | return 1; | 339 | return true; |
350 | } | 340 | } |
351 | 341 | ||
352 | #ifdef CONFIG_COMPAT | 342 | #ifdef CONFIG_COMPAT |
@@ -359,7 +349,7 @@ struct compat_ipt_ulog_info { | |||
359 | 349 | ||
360 | static void compat_from_user(void *dst, void *src) | 350 | static void compat_from_user(void *dst, void *src) |
361 | { | 351 | { |
362 | struct compat_ipt_ulog_info *cl = src; | 352 | const struct compat_ipt_ulog_info *cl = src; |
363 | struct ipt_ulog_info l = { | 353 | struct ipt_ulog_info l = { |
364 | .nl_group = cl->nl_group, | 354 | .nl_group = cl->nl_group, |
365 | .copy_range = cl->copy_range, | 355 | .copy_range = cl->copy_range, |
@@ -372,7 +362,7 @@ static void compat_from_user(void *dst, void *src) | |||
372 | 362 | ||
373 | static int compat_to_user(void __user *dst, void *src) | 363 | static int compat_to_user(void __user *dst, void *src) |
374 | { | 364 | { |
375 | struct ipt_ulog_info *l = src; | 365 | const struct ipt_ulog_info *l = src; |
376 | struct compat_ipt_ulog_info cl = { | 366 | struct compat_ipt_ulog_info cl = { |
377 | .nl_group = l->nl_group, | 367 | .nl_group = l->nl_group, |
378 | .copy_range = l->copy_range, | 368 | .copy_range = l->copy_range, |
@@ -384,7 +374,7 @@ static int compat_to_user(void __user *dst, void *src) | |||
384 | } | 374 | } |
385 | #endif /* CONFIG_COMPAT */ | 375 | #endif /* CONFIG_COMPAT */ |
386 | 376 | ||
387 | static struct xt_target ipt_ulog_reg = { | 377 | static struct xt_target ipt_ulog_reg __read_mostly = { |
388 | .name = "ULOG", | 378 | .name = "ULOG", |
389 | .family = AF_INET, | 379 | .family = AF_INET, |
390 | .target = ipt_ulog_target, | 380 | .target = ipt_ulog_target, |
@@ -408,7 +398,7 @@ static int __init ipt_ulog_init(void) | |||
408 | { | 398 | { |
409 | int ret, i; | 399 | int ret, i; |
410 | 400 | ||
411 | DEBUGP("ipt_ULOG: init module\n"); | 401 | pr_debug("ipt_ULOG: init module\n"); |
412 | 402 | ||
413 | if (nlbufsiz > 128*1024) { | 403 | if (nlbufsiz > 128*1024) { |
414 | printk("Netlink buffer has to be <= 128kB\n"); | 404 | printk("Netlink buffer has to be <= 128kB\n"); |
@@ -440,7 +430,7 @@ static void __exit ipt_ulog_fini(void) | |||
440 | ulog_buff_t *ub; | 430 | ulog_buff_t *ub; |
441 | int i; | 431 | int i; |
442 | 432 | ||
443 | DEBUGP("ipt_ULOG: cleanup_module\n"); | 433 | pr_debug("ipt_ULOG: cleanup_module\n"); |
444 | 434 | ||
445 | if (nflog) | 435 | if (nflog) |
446 | nf_log_unregister(&ipt_ulog_logger); | 436 | nf_log_unregister(&ipt_ulog_logger); |
@@ -451,7 +441,7 @@ static void __exit ipt_ulog_fini(void) | |||
451 | for (i = 0; i < ULOG_MAXNLGROUPS; i++) { | 441 | for (i = 0; i < ULOG_MAXNLGROUPS; i++) { |
452 | ub = &ulog_buffers[i]; | 442 | ub = &ulog_buffers[i]; |
453 | if (timer_pending(&ub->timer)) { | 443 | if (timer_pending(&ub->timer)) { |
454 | DEBUGP("timer was pending, deleting\n"); | 444 | pr_debug("timer was pending, deleting\n"); |
455 | del_timer(&ub->timer); | 445 | del_timer(&ub->timer); |
456 | } | 446 | } |
457 | 447 | ||
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c index a652a1451552..59f01f7ba6b4 100644 --- a/net/ipv4/netfilter/ipt_addrtype.c +++ b/net/ipv4/netfilter/ipt_addrtype.c | |||
@@ -22,19 +22,19 @@ MODULE_LICENSE("GPL"); | |||
22 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); | 22 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); |
23 | MODULE_DESCRIPTION("iptables addrtype match"); | 23 | MODULE_DESCRIPTION("iptables addrtype match"); |
24 | 24 | ||
25 | static inline int match_type(__be32 addr, u_int16_t mask) | 25 | static inline bool match_type(__be32 addr, u_int16_t mask) |
26 | { | 26 | { |
27 | return !!(mask & (1 << inet_addr_type(addr))); | 27 | return !!(mask & (1 << inet_addr_type(addr))); |
28 | } | 28 | } |
29 | 29 | ||
30 | static int match(const struct sk_buff *skb, | 30 | static bool match(const struct sk_buff *skb, |
31 | const struct net_device *in, const struct net_device *out, | 31 | const struct net_device *in, const struct net_device *out, |
32 | const struct xt_match *match, const void *matchinfo, | 32 | const struct xt_match *match, const void *matchinfo, |
33 | int offset, unsigned int protoff, int *hotdrop) | 33 | int offset, unsigned int protoff, bool *hotdrop) |
34 | { | 34 | { |
35 | const struct ipt_addrtype_info *info = matchinfo; | 35 | const struct ipt_addrtype_info *info = matchinfo; |
36 | const struct iphdr *iph = ip_hdr(skb); | 36 | const struct iphdr *iph = ip_hdr(skb); |
37 | int ret = 1; | 37 | bool ret = true; |
38 | 38 | ||
39 | if (info->source) | 39 | if (info->source) |
40 | ret &= match_type(iph->saddr, info->source)^info->invert_source; | 40 | ret &= match_type(iph->saddr, info->source)^info->invert_source; |
@@ -44,7 +44,7 @@ static int match(const struct sk_buff *skb, | |||
44 | return ret; | 44 | return ret; |
45 | } | 45 | } |
46 | 46 | ||
47 | static struct xt_match addrtype_match = { | 47 | static struct xt_match addrtype_match __read_mostly = { |
48 | .name = "addrtype", | 48 | .name = "addrtype", |
49 | .family = AF_INET, | 49 | .family = AF_INET, |
50 | .match = match, | 50 | .match = match, |
diff --git a/net/ipv4/netfilter/ipt_ah.c b/net/ipv4/netfilter/ipt_ah.c index 18a16782cf40..61b017fd743c 100644 --- a/net/ipv4/netfilter/ipt_ah.c +++ b/net/ipv4/netfilter/ipt_ah.c | |||
@@ -25,10 +25,10 @@ MODULE_DESCRIPTION("iptables AH SPI match module"); | |||
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | /* Returns 1 if the spi is matched by the range, 0 otherwise */ | 27 | /* Returns 1 if the spi is matched by the range, 0 otherwise */ |
28 | static inline int | 28 | static inline bool |
29 | spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert) | 29 | spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert) |
30 | { | 30 | { |
31 | int r=0; | 31 | bool r; |
32 | duprintf("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', | 32 | duprintf("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', |
33 | min,spi,max); | 33 | min,spi,max); |
34 | r=(spi >= min && spi <= max) ^ invert; | 34 | r=(spi >= min && spi <= max) ^ invert; |
@@ -36,7 +36,7 @@ spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert) | |||
36 | return r; | 36 | return r; |
37 | } | 37 | } |
38 | 38 | ||
39 | static int | 39 | static bool |
40 | match(const struct sk_buff *skb, | 40 | match(const struct sk_buff *skb, |
41 | const struct net_device *in, | 41 | const struct net_device *in, |
42 | const struct net_device *out, | 42 | const struct net_device *out, |
@@ -44,14 +44,15 @@ match(const struct sk_buff *skb, | |||
44 | const void *matchinfo, | 44 | const void *matchinfo, |
45 | int offset, | 45 | int offset, |
46 | unsigned int protoff, | 46 | unsigned int protoff, |
47 | int *hotdrop) | 47 | bool *hotdrop) |
48 | { | 48 | { |
49 | struct ip_auth_hdr _ahdr, *ah; | 49 | struct ip_auth_hdr _ahdr; |
50 | const struct ip_auth_hdr *ah; | ||
50 | const struct ipt_ah *ahinfo = matchinfo; | 51 | const struct ipt_ah *ahinfo = matchinfo; |
51 | 52 | ||
52 | /* Must not be a fragment. */ | 53 | /* Must not be a fragment. */ |
53 | if (offset) | 54 | if (offset) |
54 | return 0; | 55 | return false; |
55 | 56 | ||
56 | ah = skb_header_pointer(skb, protoff, | 57 | ah = skb_header_pointer(skb, protoff, |
57 | sizeof(_ahdr), &_ahdr); | 58 | sizeof(_ahdr), &_ahdr); |
@@ -60,7 +61,7 @@ match(const struct sk_buff *skb, | |||
60 | * can't. Hence, no choice but to drop. | 61 | * can't. Hence, no choice but to drop. |
61 | */ | 62 | */ |
62 | duprintf("Dropping evil AH tinygram.\n"); | 63 | duprintf("Dropping evil AH tinygram.\n"); |
63 | *hotdrop = 1; | 64 | *hotdrop = true; |
64 | return 0; | 65 | return 0; |
65 | } | 66 | } |
66 | 67 | ||
@@ -70,7 +71,7 @@ match(const struct sk_buff *skb, | |||
70 | } | 71 | } |
71 | 72 | ||
72 | /* Called when user tries to insert an entry of this type. */ | 73 | /* Called when user tries to insert an entry of this type. */ |
73 | static int | 74 | static bool |
74 | checkentry(const char *tablename, | 75 | checkentry(const char *tablename, |
75 | const void *ip_void, | 76 | const void *ip_void, |
76 | const struct xt_match *match, | 77 | const struct xt_match *match, |
@@ -82,12 +83,12 @@ checkentry(const char *tablename, | |||
82 | /* Must specify no unknown invflags */ | 83 | /* Must specify no unknown invflags */ |
83 | if (ahinfo->invflags & ~IPT_AH_INV_MASK) { | 84 | if (ahinfo->invflags & ~IPT_AH_INV_MASK) { |
84 | duprintf("ipt_ah: unknown flags %X\n", ahinfo->invflags); | 85 | duprintf("ipt_ah: unknown flags %X\n", ahinfo->invflags); |
85 | return 0; | 86 | return false; |
86 | } | 87 | } |
87 | return 1; | 88 | return true; |
88 | } | 89 | } |
89 | 90 | ||
90 | static struct xt_match ah_match = { | 91 | static struct xt_match ah_match __read_mostly = { |
91 | .name = "ah", | 92 | .name = "ah", |
92 | .family = AF_INET, | 93 | .family = AF_INET, |
93 | .match = match, | 94 | .match = match, |
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c index 26218122f865..d6925c674069 100644 --- a/net/ipv4/netfilter/ipt_ecn.c +++ b/net/ipv4/netfilter/ipt_ecn.c | |||
@@ -22,95 +22,96 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | |||
22 | MODULE_DESCRIPTION("iptables ECN matching module"); | 22 | MODULE_DESCRIPTION("iptables ECN matching module"); |
23 | MODULE_LICENSE("GPL"); | 23 | MODULE_LICENSE("GPL"); |
24 | 24 | ||
25 | static inline int match_ip(const struct sk_buff *skb, | 25 | static inline bool match_ip(const struct sk_buff *skb, |
26 | const struct ipt_ecn_info *einfo) | 26 | const struct ipt_ecn_info *einfo) |
27 | { | 27 | { |
28 | return (ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect; | 28 | return (ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect; |
29 | } | 29 | } |
30 | 30 | ||
31 | static inline int match_tcp(const struct sk_buff *skb, | 31 | static inline bool match_tcp(const struct sk_buff *skb, |
32 | const struct ipt_ecn_info *einfo, | 32 | const struct ipt_ecn_info *einfo, |
33 | int *hotdrop) | 33 | bool *hotdrop) |
34 | { | 34 | { |
35 | struct tcphdr _tcph, *th; | 35 | struct tcphdr _tcph; |
36 | const struct tcphdr *th; | ||
36 | 37 | ||
37 | /* In practice, TCP match does this, so can't fail. But let's | 38 | /* In practice, TCP match does this, so can't fail. But let's |
38 | * be good citizens. | 39 | * be good citizens. |
39 | */ | 40 | */ |
40 | th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); | 41 | th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); |
41 | if (th == NULL) { | 42 | if (th == NULL) { |
42 | *hotdrop = 0; | 43 | *hotdrop = false; |
43 | return 0; | 44 | return false; |
44 | } | 45 | } |
45 | 46 | ||
46 | if (einfo->operation & IPT_ECN_OP_MATCH_ECE) { | 47 | if (einfo->operation & IPT_ECN_OP_MATCH_ECE) { |
47 | if (einfo->invert & IPT_ECN_OP_MATCH_ECE) { | 48 | if (einfo->invert & IPT_ECN_OP_MATCH_ECE) { |
48 | if (th->ece == 1) | 49 | if (th->ece == 1) |
49 | return 0; | 50 | return false; |
50 | } else { | 51 | } else { |
51 | if (th->ece == 0) | 52 | if (th->ece == 0) |
52 | return 0; | 53 | return false; |
53 | } | 54 | } |
54 | } | 55 | } |
55 | 56 | ||
56 | if (einfo->operation & IPT_ECN_OP_MATCH_CWR) { | 57 | if (einfo->operation & IPT_ECN_OP_MATCH_CWR) { |
57 | if (einfo->invert & IPT_ECN_OP_MATCH_CWR) { | 58 | if (einfo->invert & IPT_ECN_OP_MATCH_CWR) { |
58 | if (th->cwr == 1) | 59 | if (th->cwr == 1) |
59 | return 0; | 60 | return false; |
60 | } else { | 61 | } else { |
61 | if (th->cwr == 0) | 62 | if (th->cwr == 0) |
62 | return 0; | 63 | return false; |
63 | } | 64 | } |
64 | } | 65 | } |
65 | 66 | ||
66 | return 1; | 67 | return true; |
67 | } | 68 | } |
68 | 69 | ||
69 | static int match(const struct sk_buff *skb, | 70 | static bool match(const struct sk_buff *skb, |
70 | const struct net_device *in, const struct net_device *out, | 71 | const struct net_device *in, const struct net_device *out, |
71 | const struct xt_match *match, const void *matchinfo, | 72 | const struct xt_match *match, const void *matchinfo, |
72 | int offset, unsigned int protoff, int *hotdrop) | 73 | int offset, unsigned int protoff, bool *hotdrop) |
73 | { | 74 | { |
74 | const struct ipt_ecn_info *info = matchinfo; | 75 | const struct ipt_ecn_info *info = matchinfo; |
75 | 76 | ||
76 | if (info->operation & IPT_ECN_OP_MATCH_IP) | 77 | if (info->operation & IPT_ECN_OP_MATCH_IP) |
77 | if (!match_ip(skb, info)) | 78 | if (!match_ip(skb, info)) |
78 | return 0; | 79 | return false; |
79 | 80 | ||
80 | if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) { | 81 | if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) { |
81 | if (ip_hdr(skb)->protocol != IPPROTO_TCP) | 82 | if (ip_hdr(skb)->protocol != IPPROTO_TCP) |
82 | return 0; | 83 | return false; |
83 | if (!match_tcp(skb, info, hotdrop)) | 84 | if (!match_tcp(skb, info, hotdrop)) |
84 | return 0; | 85 | return false; |
85 | } | 86 | } |
86 | 87 | ||
87 | return 1; | 88 | return true; |
88 | } | 89 | } |
89 | 90 | ||
90 | static int checkentry(const char *tablename, const void *ip_void, | 91 | static bool checkentry(const char *tablename, const void *ip_void, |
91 | const struct xt_match *match, | 92 | const struct xt_match *match, |
92 | void *matchinfo, unsigned int hook_mask) | 93 | void *matchinfo, unsigned int hook_mask) |
93 | { | 94 | { |
94 | const struct ipt_ecn_info *info = matchinfo; | 95 | const struct ipt_ecn_info *info = matchinfo; |
95 | const struct ipt_ip *ip = ip_void; | 96 | const struct ipt_ip *ip = ip_void; |
96 | 97 | ||
97 | if (info->operation & IPT_ECN_OP_MATCH_MASK) | 98 | if (info->operation & IPT_ECN_OP_MATCH_MASK) |
98 | return 0; | 99 | return false; |
99 | 100 | ||
100 | if (info->invert & IPT_ECN_OP_MATCH_MASK) | 101 | if (info->invert & IPT_ECN_OP_MATCH_MASK) |
101 | return 0; | 102 | return false; |
102 | 103 | ||
103 | if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) | 104 | if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) |
104 | && ip->proto != IPPROTO_TCP) { | 105 | && ip->proto != IPPROTO_TCP) { |
105 | printk(KERN_WARNING "ipt_ecn: can't match TCP bits in rule for" | 106 | printk(KERN_WARNING "ipt_ecn: can't match TCP bits in rule for" |
106 | " non-tcp packets\n"); | 107 | " non-tcp packets\n"); |
107 | return 0; | 108 | return false; |
108 | } | 109 | } |
109 | 110 | ||
110 | return 1; | 111 | return true; |
111 | } | 112 | } |
112 | 113 | ||
113 | static struct xt_match ecn_match = { | 114 | static struct xt_match ecn_match __read_mostly = { |
114 | .name = "ecn", | 115 | .name = "ecn", |
115 | .family = AF_INET, | 116 | .family = AF_INET, |
116 | .match = match, | 117 | .match = match, |
diff --git a/net/ipv4/netfilter/ipt_iprange.c b/net/ipv4/netfilter/ipt_iprange.c index 33af9e940887..0106dc955a69 100644 --- a/net/ipv4/netfilter/ipt_iprange.c +++ b/net/ipv4/netfilter/ipt_iprange.c | |||
@@ -17,53 +17,47 @@ MODULE_LICENSE("GPL"); | |||
17 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); | 17 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); |
18 | MODULE_DESCRIPTION("iptables arbitrary IP range match module"); | 18 | MODULE_DESCRIPTION("iptables arbitrary IP range match module"); |
19 | 19 | ||
20 | #if 0 | 20 | static bool |
21 | #define DEBUGP printk | ||
22 | #else | ||
23 | #define DEBUGP(format, args...) | ||
24 | #endif | ||
25 | |||
26 | static int | ||
27 | match(const struct sk_buff *skb, | 21 | match(const struct sk_buff *skb, |
28 | const struct net_device *in, | 22 | const struct net_device *in, |
29 | const struct net_device *out, | 23 | const struct net_device *out, |
30 | const struct xt_match *match, | 24 | const struct xt_match *match, |
31 | const void *matchinfo, | 25 | const void *matchinfo, |
32 | int offset, unsigned int protoff, int *hotdrop) | 26 | int offset, unsigned int protoff, bool *hotdrop) |
33 | { | 27 | { |
34 | const struct ipt_iprange_info *info = matchinfo; | 28 | const struct ipt_iprange_info *info = matchinfo; |
35 | const struct iphdr *iph = ip_hdr(skb); | 29 | const struct iphdr *iph = ip_hdr(skb); |
36 | 30 | ||
37 | if (info->flags & IPRANGE_SRC) { | 31 | if (info->flags & IPRANGE_SRC) { |
38 | if (((ntohl(iph->saddr) < ntohl(info->src.min_ip)) | 32 | if ((ntohl(iph->saddr) < ntohl(info->src.min_ip) |
39 | || (ntohl(iph->saddr) > ntohl(info->src.max_ip))) | 33 | || ntohl(iph->saddr) > ntohl(info->src.max_ip)) |
40 | ^ !!(info->flags & IPRANGE_SRC_INV)) { | 34 | ^ !!(info->flags & IPRANGE_SRC_INV)) { |
41 | DEBUGP("src IP %u.%u.%u.%u NOT in range %s" | 35 | pr_debug("src IP %u.%u.%u.%u NOT in range %s" |
42 | "%u.%u.%u.%u-%u.%u.%u.%u\n", | 36 | "%u.%u.%u.%u-%u.%u.%u.%u\n", |
43 | NIPQUAD(iph->saddr), | 37 | NIPQUAD(iph->saddr), |
44 | info->flags & IPRANGE_SRC_INV ? "(INV) " : "", | 38 | info->flags & IPRANGE_SRC_INV ? "(INV) " : "", |
45 | NIPQUAD(info->src.min_ip), | 39 | NIPQUAD(info->src.min_ip), |
46 | NIPQUAD(info->src.max_ip)); | 40 | NIPQUAD(info->src.max_ip)); |
47 | return 0; | 41 | return false; |
48 | } | 42 | } |
49 | } | 43 | } |
50 | if (info->flags & IPRANGE_DST) { | 44 | if (info->flags & IPRANGE_DST) { |
51 | if (((ntohl(iph->daddr) < ntohl(info->dst.min_ip)) | 45 | if ((ntohl(iph->daddr) < ntohl(info->dst.min_ip) |
52 | || (ntohl(iph->daddr) > ntohl(info->dst.max_ip))) | 46 | || ntohl(iph->daddr) > ntohl(info->dst.max_ip)) |
53 | ^ !!(info->flags & IPRANGE_DST_INV)) { | 47 | ^ !!(info->flags & IPRANGE_DST_INV)) { |
54 | DEBUGP("dst IP %u.%u.%u.%u NOT in range %s" | 48 | pr_debug("dst IP %u.%u.%u.%u NOT in range %s" |
55 | "%u.%u.%u.%u-%u.%u.%u.%u\n", | 49 | "%u.%u.%u.%u-%u.%u.%u.%u\n", |
56 | NIPQUAD(iph->daddr), | 50 | NIPQUAD(iph->daddr), |
57 | info->flags & IPRANGE_DST_INV ? "(INV) " : "", | 51 | info->flags & IPRANGE_DST_INV ? "(INV) " : "", |
58 | NIPQUAD(info->dst.min_ip), | 52 | NIPQUAD(info->dst.min_ip), |
59 | NIPQUAD(info->dst.max_ip)); | 53 | NIPQUAD(info->dst.max_ip)); |
60 | return 0; | 54 | return false; |
61 | } | 55 | } |
62 | } | 56 | } |
63 | return 1; | 57 | return true; |
64 | } | 58 | } |
65 | 59 | ||
66 | static struct xt_match iprange_match = { | 60 | static struct xt_match iprange_match __read_mostly = { |
67 | .name = "iprange", | 61 | .name = "iprange", |
68 | .family = AF_INET, | 62 | .family = AF_INET, |
69 | .match = match, | 63 | .match = match, |
diff --git a/net/ipv4/netfilter/ipt_owner.c b/net/ipv4/netfilter/ipt_owner.c index 7fae9aa8944c..b14e77da7a33 100644 --- a/net/ipv4/netfilter/ipt_owner.c +++ b/net/ipv4/netfilter/ipt_owner.c | |||
@@ -21,7 +21,7 @@ MODULE_LICENSE("GPL"); | |||
21 | MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); | 21 | MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); |
22 | MODULE_DESCRIPTION("iptables owner match"); | 22 | MODULE_DESCRIPTION("iptables owner match"); |
23 | 23 | ||
24 | static int | 24 | static bool |
25 | match(const struct sk_buff *skb, | 25 | match(const struct sk_buff *skb, |
26 | const struct net_device *in, | 26 | const struct net_device *in, |
27 | const struct net_device *out, | 27 | const struct net_device *out, |
@@ -29,29 +29,29 @@ match(const struct sk_buff *skb, | |||
29 | const void *matchinfo, | 29 | const void *matchinfo, |
30 | int offset, | 30 | int offset, |
31 | unsigned int protoff, | 31 | unsigned int protoff, |
32 | int *hotdrop) | 32 | bool *hotdrop) |
33 | { | 33 | { |
34 | const struct ipt_owner_info *info = matchinfo; | 34 | const struct ipt_owner_info *info = matchinfo; |
35 | 35 | ||
36 | if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file) | 36 | if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file) |
37 | return 0; | 37 | return false; |
38 | 38 | ||
39 | if(info->match & IPT_OWNER_UID) { | 39 | if(info->match & IPT_OWNER_UID) { |
40 | if ((skb->sk->sk_socket->file->f_uid != info->uid) ^ | 40 | if ((skb->sk->sk_socket->file->f_uid != info->uid) ^ |
41 | !!(info->invert & IPT_OWNER_UID)) | 41 | !!(info->invert & IPT_OWNER_UID)) |
42 | return 0; | 42 | return false; |
43 | } | 43 | } |
44 | 44 | ||
45 | if(info->match & IPT_OWNER_GID) { | 45 | if(info->match & IPT_OWNER_GID) { |
46 | if ((skb->sk->sk_socket->file->f_gid != info->gid) ^ | 46 | if ((skb->sk->sk_socket->file->f_gid != info->gid) ^ |
47 | !!(info->invert & IPT_OWNER_GID)) | 47 | !!(info->invert & IPT_OWNER_GID)) |
48 | return 0; | 48 | return false; |
49 | } | 49 | } |
50 | 50 | ||
51 | return 1; | 51 | return true; |
52 | } | 52 | } |
53 | 53 | ||
54 | static int | 54 | static bool |
55 | checkentry(const char *tablename, | 55 | checkentry(const char *tablename, |
56 | const void *ip, | 56 | const void *ip, |
57 | const struct xt_match *match, | 57 | const struct xt_match *match, |
@@ -63,12 +63,12 @@ checkentry(const char *tablename, | |||
63 | if (info->match & (IPT_OWNER_PID|IPT_OWNER_SID|IPT_OWNER_COMM)) { | 63 | if (info->match & (IPT_OWNER_PID|IPT_OWNER_SID|IPT_OWNER_COMM)) { |
64 | printk("ipt_owner: pid, sid and command matching " | 64 | printk("ipt_owner: pid, sid and command matching " |
65 | "not supported anymore\n"); | 65 | "not supported anymore\n"); |
66 | return 0; | 66 | return false; |
67 | } | 67 | } |
68 | return 1; | 68 | return true; |
69 | } | 69 | } |
70 | 70 | ||
71 | static struct xt_match owner_match = { | 71 | static struct xt_match owner_match __read_mostly = { |
72 | .name = "owner", | 72 | .name = "owner", |
73 | .family = AF_INET, | 73 | .family = AF_INET, |
74 | .match = match, | 74 | .match = match, |
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c index 15a9e8bbb7cc..321804315659 100644 --- a/net/ipv4/netfilter/ipt_recent.c +++ b/net/ipv4/netfilter/ipt_recent.c | |||
@@ -163,24 +163,23 @@ static void recent_table_flush(struct recent_table *t) | |||
163 | struct recent_entry *e, *next; | 163 | struct recent_entry *e, *next; |
164 | unsigned int i; | 164 | unsigned int i; |
165 | 165 | ||
166 | for (i = 0; i < ip_list_hash_size; i++) { | 166 | for (i = 0; i < ip_list_hash_size; i++) |
167 | list_for_each_entry_safe(e, next, &t->iphash[i], list) | 167 | list_for_each_entry_safe(e, next, &t->iphash[i], list) |
168 | recent_entry_remove(t, e); | 168 | recent_entry_remove(t, e); |
169 | } | ||
170 | } | 169 | } |
171 | 170 | ||
172 | static int | 171 | static bool |
173 | ipt_recent_match(const struct sk_buff *skb, | 172 | ipt_recent_match(const struct sk_buff *skb, |
174 | const struct net_device *in, const struct net_device *out, | 173 | const struct net_device *in, const struct net_device *out, |
175 | const struct xt_match *match, const void *matchinfo, | 174 | const struct xt_match *match, const void *matchinfo, |
176 | int offset, unsigned int protoff, int *hotdrop) | 175 | int offset, unsigned int protoff, bool *hotdrop) |
177 | { | 176 | { |
178 | const struct ipt_recent_info *info = matchinfo; | 177 | const struct ipt_recent_info *info = matchinfo; |
179 | struct recent_table *t; | 178 | struct recent_table *t; |
180 | struct recent_entry *e; | 179 | struct recent_entry *e; |
181 | __be32 addr; | 180 | __be32 addr; |
182 | u_int8_t ttl; | 181 | u_int8_t ttl; |
183 | int ret = info->invert; | 182 | bool ret = info->invert; |
184 | 183 | ||
185 | if (info->side == IPT_RECENT_DEST) | 184 | if (info->side == IPT_RECENT_DEST) |
186 | addr = ip_hdr(skb)->daddr; | 185 | addr = ip_hdr(skb)->daddr; |
@@ -201,16 +200,16 @@ ipt_recent_match(const struct sk_buff *skb, | |||
201 | goto out; | 200 | goto out; |
202 | e = recent_entry_init(t, addr, ttl); | 201 | e = recent_entry_init(t, addr, ttl); |
203 | if (e == NULL) | 202 | if (e == NULL) |
204 | *hotdrop = 1; | 203 | *hotdrop = true; |
205 | ret ^= 1; | 204 | ret = !ret; |
206 | goto out; | 205 | goto out; |
207 | } | 206 | } |
208 | 207 | ||
209 | if (info->check_set & IPT_RECENT_SET) | 208 | if (info->check_set & IPT_RECENT_SET) |
210 | ret ^= 1; | 209 | ret = !ret; |
211 | else if (info->check_set & IPT_RECENT_REMOVE) { | 210 | else if (info->check_set & IPT_RECENT_REMOVE) { |
212 | recent_entry_remove(t, e); | 211 | recent_entry_remove(t, e); |
213 | ret ^= 1; | 212 | ret = !ret; |
214 | } else if (info->check_set & (IPT_RECENT_CHECK | IPT_RECENT_UPDATE)) { | 213 | } else if (info->check_set & (IPT_RECENT_CHECK | IPT_RECENT_UPDATE)) { |
215 | unsigned long t = jiffies - info->seconds * HZ; | 214 | unsigned long t = jiffies - info->seconds * HZ; |
216 | unsigned int i, hits = 0; | 215 | unsigned int i, hits = 0; |
@@ -219,7 +218,7 @@ ipt_recent_match(const struct sk_buff *skb, | |||
219 | if (info->seconds && time_after(t, e->stamps[i])) | 218 | if (info->seconds && time_after(t, e->stamps[i])) |
220 | continue; | 219 | continue; |
221 | if (++hits >= info->hit_count) { | 220 | if (++hits >= info->hit_count) { |
222 | ret ^= 1; | 221 | ret = !ret; |
223 | break; | 222 | break; |
224 | } | 223 | } |
225 | } | 224 | } |
@@ -235,7 +234,7 @@ out: | |||
235 | return ret; | 234 | return ret; |
236 | } | 235 | } |
237 | 236 | ||
238 | static int | 237 | static bool |
239 | ipt_recent_checkentry(const char *tablename, const void *ip, | 238 | ipt_recent_checkentry(const char *tablename, const void *ip, |
240 | const struct xt_match *match, void *matchinfo, | 239 | const struct xt_match *match, void *matchinfo, |
241 | unsigned int hook_mask) | 240 | unsigned int hook_mask) |
@@ -243,24 +242,24 @@ ipt_recent_checkentry(const char *tablename, const void *ip, | |||
243 | const struct ipt_recent_info *info = matchinfo; | 242 | const struct ipt_recent_info *info = matchinfo; |
244 | struct recent_table *t; | 243 | struct recent_table *t; |
245 | unsigned i; | 244 | unsigned i; |
246 | int ret = 0; | 245 | bool ret = false; |
247 | 246 | ||
248 | if (hweight8(info->check_set & | 247 | if (hweight8(info->check_set & |
249 | (IPT_RECENT_SET | IPT_RECENT_REMOVE | | 248 | (IPT_RECENT_SET | IPT_RECENT_REMOVE | |
250 | IPT_RECENT_CHECK | IPT_RECENT_UPDATE)) != 1) | 249 | IPT_RECENT_CHECK | IPT_RECENT_UPDATE)) != 1) |
251 | return 0; | 250 | return false; |
252 | if ((info->check_set & (IPT_RECENT_SET | IPT_RECENT_REMOVE)) && | 251 | if ((info->check_set & (IPT_RECENT_SET | IPT_RECENT_REMOVE)) && |
253 | (info->seconds || info->hit_count)) | 252 | (info->seconds || info->hit_count)) |
254 | return 0; | 253 | return false; |
255 | if (info->name[0] == '\0' || | 254 | if (info->name[0] == '\0' || |
256 | strnlen(info->name, IPT_RECENT_NAME_LEN) == IPT_RECENT_NAME_LEN) | 255 | strnlen(info->name, IPT_RECENT_NAME_LEN) == IPT_RECENT_NAME_LEN) |
257 | return 0; | 256 | return false; |
258 | 257 | ||
259 | mutex_lock(&recent_mutex); | 258 | mutex_lock(&recent_mutex); |
260 | t = recent_table_lookup(info->name); | 259 | t = recent_table_lookup(info->name); |
261 | if (t != NULL) { | 260 | if (t != NULL) { |
262 | t->refcnt++; | 261 | t->refcnt++; |
263 | ret = 1; | 262 | ret = true; |
264 | goto out; | 263 | goto out; |
265 | } | 264 | } |
266 | 265 | ||
@@ -287,7 +286,7 @@ ipt_recent_checkentry(const char *tablename, const void *ip, | |||
287 | spin_lock_bh(&recent_lock); | 286 | spin_lock_bh(&recent_lock); |
288 | list_add_tail(&t->list, &tables); | 287 | list_add_tail(&t->list, &tables); |
289 | spin_unlock_bh(&recent_lock); | 288 | spin_unlock_bh(&recent_lock); |
290 | ret = 1; | 289 | ret = true; |
291 | out: | 290 | out: |
292 | mutex_unlock(&recent_mutex); | 291 | mutex_unlock(&recent_mutex); |
293 | return ret; | 292 | return ret; |
@@ -323,18 +322,16 @@ struct recent_iter_state { | |||
323 | static void *recent_seq_start(struct seq_file *seq, loff_t *pos) | 322 | static void *recent_seq_start(struct seq_file *seq, loff_t *pos) |
324 | { | 323 | { |
325 | struct recent_iter_state *st = seq->private; | 324 | struct recent_iter_state *st = seq->private; |
326 | struct recent_table *t = st->table; | 325 | const struct recent_table *t = st->table; |
327 | struct recent_entry *e; | 326 | struct recent_entry *e; |
328 | loff_t p = *pos; | 327 | loff_t p = *pos; |
329 | 328 | ||
330 | spin_lock_bh(&recent_lock); | 329 | spin_lock_bh(&recent_lock); |
331 | 330 | ||
332 | for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++) { | 331 | for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++) |
333 | list_for_each_entry(e, &t->iphash[st->bucket], list) { | 332 | list_for_each_entry(e, &t->iphash[st->bucket], list) |
334 | if (p-- == 0) | 333 | if (p-- == 0) |
335 | return e; | 334 | return e; |
336 | } | ||
337 | } | ||
338 | return NULL; | 335 | return NULL; |
339 | } | 336 | } |
340 | 337 | ||
@@ -373,7 +370,7 @@ static int recent_seq_show(struct seq_file *seq, void *v) | |||
373 | return 0; | 370 | return 0; |
374 | } | 371 | } |
375 | 372 | ||
376 | static struct seq_operations recent_seq_ops = { | 373 | static const struct seq_operations recent_seq_ops = { |
377 | .start = recent_seq_start, | 374 | .start = recent_seq_start, |
378 | .next = recent_seq_next, | 375 | .next = recent_seq_next, |
379 | .stop = recent_seq_stop, | 376 | .stop = recent_seq_stop, |
@@ -463,7 +460,7 @@ static const struct file_operations recent_fops = { | |||
463 | }; | 460 | }; |
464 | #endif /* CONFIG_PROC_FS */ | 461 | #endif /* CONFIG_PROC_FS */ |
465 | 462 | ||
466 | static struct xt_match recent_match = { | 463 | static struct xt_match recent_match __read_mostly = { |
467 | .name = "recent", | 464 | .name = "recent", |
468 | .family = AF_INET, | 465 | .family = AF_INET, |
469 | .match = ipt_recent_match, | 466 | .match = ipt_recent_match, |
diff --git a/net/ipv4/netfilter/ipt_tos.c b/net/ipv4/netfilter/ipt_tos.c index d314844af12b..e740441c973d 100644 --- a/net/ipv4/netfilter/ipt_tos.c +++ b/net/ipv4/netfilter/ipt_tos.c | |||
@@ -18,7 +18,7 @@ | |||
18 | MODULE_LICENSE("GPL"); | 18 | MODULE_LICENSE("GPL"); |
19 | MODULE_DESCRIPTION("iptables TOS match module"); | 19 | MODULE_DESCRIPTION("iptables TOS match module"); |
20 | 20 | ||
21 | static int | 21 | static bool |
22 | match(const struct sk_buff *skb, | 22 | match(const struct sk_buff *skb, |
23 | const struct net_device *in, | 23 | const struct net_device *in, |
24 | const struct net_device *out, | 24 | const struct net_device *out, |
@@ -26,14 +26,14 @@ match(const struct sk_buff *skb, | |||
26 | const void *matchinfo, | 26 | const void *matchinfo, |
27 | int offset, | 27 | int offset, |
28 | unsigned int protoff, | 28 | unsigned int protoff, |
29 | int *hotdrop) | 29 | bool *hotdrop) |
30 | { | 30 | { |
31 | const struct ipt_tos_info *info = matchinfo; | 31 | const struct ipt_tos_info *info = matchinfo; |
32 | 32 | ||
33 | return (ip_hdr(skb)->tos == info->tos) ^ info->invert; | 33 | return (ip_hdr(skb)->tos == info->tos) ^ info->invert; |
34 | } | 34 | } |
35 | 35 | ||
36 | static struct xt_match tos_match = { | 36 | static struct xt_match tos_match __read_mostly = { |
37 | .name = "tos", | 37 | .name = "tos", |
38 | .family = AF_INET, | 38 | .family = AF_INET, |
39 | .match = match, | 39 | .match = match, |
diff --git a/net/ipv4/netfilter/ipt_ttl.c b/net/ipv4/netfilter/ipt_ttl.c index ab02d9e3139c..a439900a4ba5 100644 --- a/net/ipv4/netfilter/ipt_ttl.c +++ b/net/ipv4/netfilter/ipt_ttl.c | |||
@@ -18,37 +18,33 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | |||
18 | MODULE_DESCRIPTION("IP tables TTL matching module"); | 18 | MODULE_DESCRIPTION("IP tables TTL matching module"); |
19 | MODULE_LICENSE("GPL"); | 19 | MODULE_LICENSE("GPL"); |
20 | 20 | ||
21 | static int match(const struct sk_buff *skb, | 21 | static bool match(const struct sk_buff *skb, |
22 | const struct net_device *in, const struct net_device *out, | 22 | const struct net_device *in, const struct net_device *out, |
23 | const struct xt_match *match, const void *matchinfo, | 23 | const struct xt_match *match, const void *matchinfo, |
24 | int offset, unsigned int protoff, int *hotdrop) | 24 | int offset, unsigned int protoff, bool *hotdrop) |
25 | { | 25 | { |
26 | const struct ipt_ttl_info *info = matchinfo; | 26 | const struct ipt_ttl_info *info = matchinfo; |
27 | const u8 ttl = ip_hdr(skb)->ttl; | 27 | const u8 ttl = ip_hdr(skb)->ttl; |
28 | 28 | ||
29 | switch (info->mode) { | 29 | switch (info->mode) { |
30 | case IPT_TTL_EQ: | 30 | case IPT_TTL_EQ: |
31 | return (ttl == info->ttl); | 31 | return ttl == info->ttl; |
32 | break; | ||
33 | case IPT_TTL_NE: | 32 | case IPT_TTL_NE: |
34 | return (!(ttl == info->ttl)); | 33 | return ttl != info->ttl; |
35 | break; | ||
36 | case IPT_TTL_LT: | 34 | case IPT_TTL_LT: |
37 | return (ttl < info->ttl); | 35 | return ttl < info->ttl; |
38 | break; | ||
39 | case IPT_TTL_GT: | 36 | case IPT_TTL_GT: |
40 | return (ttl > info->ttl); | 37 | return ttl > info->ttl; |
41 | break; | ||
42 | default: | 38 | default: |
43 | printk(KERN_WARNING "ipt_ttl: unknown mode %d\n", | 39 | printk(KERN_WARNING "ipt_ttl: unknown mode %d\n", |
44 | info->mode); | 40 | info->mode); |
45 | return 0; | 41 | return false; |
46 | } | 42 | } |
47 | 43 | ||
48 | return 0; | 44 | return false; |
49 | } | 45 | } |
50 | 46 | ||
51 | static struct xt_match ttl_match = { | 47 | static struct xt_match ttl_match __read_mostly = { |
52 | .name = "ttl", | 48 | .name = "ttl", |
53 | .family = AF_INET, | 49 | .family = AF_INET, |
54 | .match = match, | 50 | .match = match, |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 6dc72a815f77..3c5629938487 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -24,12 +24,6 @@ | |||
24 | #include <net/netfilter/nf_conntrack_core.h> | 24 | #include <net/netfilter/nf_conntrack_core.h> |
25 | #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> | 25 | #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> |
26 | 26 | ||
27 | #if 0 | ||
28 | #define DEBUGP printk | ||
29 | #else | ||
30 | #define DEBUGP(format, args...) | ||
31 | #endif | ||
32 | |||
33 | static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, | 27 | static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, |
34 | struct nf_conntrack_tuple *tuple) | 28 | struct nf_conntrack_tuple *tuple) |
35 | { | 29 | { |
@@ -103,17 +97,6 @@ ipv4_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff, | |||
103 | return NF_ACCEPT; | 97 | return NF_ACCEPT; |
104 | } | 98 | } |
105 | 99 | ||
106 | int nf_nat_module_is_loaded = 0; | ||
107 | EXPORT_SYMBOL_GPL(nf_nat_module_is_loaded); | ||
108 | |||
109 | static u_int32_t ipv4_get_features(const struct nf_conntrack_tuple *tuple) | ||
110 | { | ||
111 | if (nf_nat_module_is_loaded) | ||
112 | return NF_CT_F_NAT; | ||
113 | |||
114 | return NF_CT_F_BASIC; | ||
115 | } | ||
116 | |||
117 | static unsigned int ipv4_confirm(unsigned int hooknum, | 100 | static unsigned int ipv4_confirm(unsigned int hooknum, |
118 | struct sk_buff **pskb, | 101 | struct sk_buff **pskb, |
119 | const struct net_device *in, | 102 | const struct net_device *in, |
@@ -335,17 +318,17 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) | |||
335 | 318 | ||
336 | /* We only do TCP at the moment: is there a better way? */ | 319 | /* We only do TCP at the moment: is there a better way? */ |
337 | if (strcmp(sk->sk_prot->name, "TCP")) { | 320 | if (strcmp(sk->sk_prot->name, "TCP")) { |
338 | DEBUGP("SO_ORIGINAL_DST: Not a TCP socket\n"); | 321 | pr_debug("SO_ORIGINAL_DST: Not a TCP socket\n"); |
339 | return -ENOPROTOOPT; | 322 | return -ENOPROTOOPT; |
340 | } | 323 | } |
341 | 324 | ||
342 | if ((unsigned int) *len < sizeof(struct sockaddr_in)) { | 325 | if ((unsigned int) *len < sizeof(struct sockaddr_in)) { |
343 | DEBUGP("SO_ORIGINAL_DST: len %u not %u\n", | 326 | pr_debug("SO_ORIGINAL_DST: len %d not %Zu\n", |
344 | *len, sizeof(struct sockaddr_in)); | 327 | *len, sizeof(struct sockaddr_in)); |
345 | return -EINVAL; | 328 | return -EINVAL; |
346 | } | 329 | } |
347 | 330 | ||
348 | h = nf_conntrack_find_get(&tuple, NULL); | 331 | h = nf_conntrack_find_get(&tuple); |
349 | if (h) { | 332 | if (h) { |
350 | struct sockaddr_in sin; | 333 | struct sockaddr_in sin; |
351 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); | 334 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); |
@@ -357,17 +340,17 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) | |||
357 | .tuple.dst.u3.ip; | 340 | .tuple.dst.u3.ip; |
358 | memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); | 341 | memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); |
359 | 342 | ||
360 | DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n", | 343 | pr_debug("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n", |
361 | NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); | 344 | NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); |
362 | nf_ct_put(ct); | 345 | nf_ct_put(ct); |
363 | if (copy_to_user(user, &sin, sizeof(sin)) != 0) | 346 | if (copy_to_user(user, &sin, sizeof(sin)) != 0) |
364 | return -EFAULT; | 347 | return -EFAULT; |
365 | else | 348 | else |
366 | return 0; | 349 | return 0; |
367 | } | 350 | } |
368 | DEBUGP("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n", | 351 | pr_debug("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n", |
369 | NIPQUAD(tuple.src.u3.ip), ntohs(tuple.src.u.tcp.port), | 352 | NIPQUAD(tuple.src.u3.ip), ntohs(tuple.src.u.tcp.port), |
370 | NIPQUAD(tuple.dst.u3.ip), ntohs(tuple.dst.u.tcp.port)); | 353 | NIPQUAD(tuple.dst.u3.ip), ntohs(tuple.dst.u.tcp.port)); |
371 | return -ENOENT; | 354 | return -ENOENT; |
372 | } | 355 | } |
373 | 356 | ||
@@ -425,7 +408,6 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = { | |||
425 | .print_tuple = ipv4_print_tuple, | 408 | .print_tuple = ipv4_print_tuple, |
426 | .print_conntrack = ipv4_print_conntrack, | 409 | .print_conntrack = ipv4_print_conntrack, |
427 | .prepare = ipv4_prepare, | 410 | .prepare = ipv4_prepare, |
428 | .get_features = ipv4_get_features, | ||
429 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 411 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
430 | .tuple_to_nfattr = ipv4_tuple_to_nfattr, | 412 | .tuple_to_nfattr = ipv4_tuple_to_nfattr, |
431 | .nfattr_to_tuple = ipv4_nfattr_to_tuple, | 413 | .nfattr_to_tuple = ipv4_nfattr_to_tuple, |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 89f933e81035..3da9d73d1b52 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | |||
@@ -18,12 +18,6 @@ | |||
18 | #include <net/netfilter/nf_conntrack_l4proto.h> | 18 | #include <net/netfilter/nf_conntrack_l4proto.h> |
19 | #include <net/netfilter/nf_conntrack_expect.h> | 19 | #include <net/netfilter/nf_conntrack_expect.h> |
20 | 20 | ||
21 | #if 0 | ||
22 | #define DEBUGP printk | ||
23 | #else | ||
24 | #define DEBUGP(format, args...) | ||
25 | #endif | ||
26 | |||
27 | #ifdef CONFIG_NF_CT_ACCT | 21 | #ifdef CONFIG_NF_CT_ACCT |
28 | static unsigned int | 22 | static unsigned int |
29 | seq_print_counters(struct seq_file *s, | 23 | seq_print_counters(struct seq_file *s, |
@@ -41,35 +35,36 @@ struct ct_iter_state { | |||
41 | unsigned int bucket; | 35 | unsigned int bucket; |
42 | }; | 36 | }; |
43 | 37 | ||
44 | static struct list_head *ct_get_first(struct seq_file *seq) | 38 | static struct hlist_node *ct_get_first(struct seq_file *seq) |
45 | { | 39 | { |
46 | struct ct_iter_state *st = seq->private; | 40 | struct ct_iter_state *st = seq->private; |
47 | 41 | ||
48 | for (st->bucket = 0; | 42 | for (st->bucket = 0; |
49 | st->bucket < nf_conntrack_htable_size; | 43 | st->bucket < nf_conntrack_htable_size; |
50 | st->bucket++) { | 44 | st->bucket++) { |
51 | if (!list_empty(&nf_conntrack_hash[st->bucket])) | 45 | if (!hlist_empty(&nf_conntrack_hash[st->bucket])) |
52 | return nf_conntrack_hash[st->bucket].next; | 46 | return nf_conntrack_hash[st->bucket].first; |
53 | } | 47 | } |
54 | return NULL; | 48 | return NULL; |
55 | } | 49 | } |
56 | 50 | ||
57 | static struct list_head *ct_get_next(struct seq_file *seq, struct list_head *head) | 51 | static struct hlist_node *ct_get_next(struct seq_file *seq, |
52 | struct hlist_node *head) | ||
58 | { | 53 | { |
59 | struct ct_iter_state *st = seq->private; | 54 | struct ct_iter_state *st = seq->private; |
60 | 55 | ||
61 | head = head->next; | 56 | head = head->next; |
62 | while (head == &nf_conntrack_hash[st->bucket]) { | 57 | while (head == NULL) { |
63 | if (++st->bucket >= nf_conntrack_htable_size) | 58 | if (++st->bucket >= nf_conntrack_htable_size) |
64 | return NULL; | 59 | return NULL; |
65 | head = nf_conntrack_hash[st->bucket].next; | 60 | head = nf_conntrack_hash[st->bucket].first; |
66 | } | 61 | } |
67 | return head; | 62 | return head; |
68 | } | 63 | } |
69 | 64 | ||
70 | static struct list_head *ct_get_idx(struct seq_file *seq, loff_t pos) | 65 | static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos) |
71 | { | 66 | { |
72 | struct list_head *head = ct_get_first(seq); | 67 | struct hlist_node *head = ct_get_first(seq); |
73 | 68 | ||
74 | if (head) | 69 | if (head) |
75 | while (pos && (head = ct_get_next(seq, head))) | 70 | while (pos && (head = ct_get_next(seq, head))) |
@@ -169,7 +164,7 @@ static int ct_seq_show(struct seq_file *s, void *v) | |||
169 | return 0; | 164 | return 0; |
170 | } | 165 | } |
171 | 166 | ||
172 | static struct seq_operations ct_seq_ops = { | 167 | static const struct seq_operations ct_seq_ops = { |
173 | .start = ct_seq_start, | 168 | .start = ct_seq_start, |
174 | .next = ct_seq_next, | 169 | .next = ct_seq_next, |
175 | .stop = ct_seq_stop, | 170 | .stop = ct_seq_stop, |
@@ -206,47 +201,68 @@ static const struct file_operations ct_file_ops = { | |||
206 | }; | 201 | }; |
207 | 202 | ||
208 | /* expects */ | 203 | /* expects */ |
209 | static void *exp_seq_start(struct seq_file *s, loff_t *pos) | 204 | struct ct_expect_iter_state { |
205 | unsigned int bucket; | ||
206 | }; | ||
207 | |||
208 | static struct hlist_node *ct_expect_get_first(struct seq_file *seq) | ||
210 | { | 209 | { |
211 | struct list_head *e = &nf_conntrack_expect_list; | 210 | struct ct_expect_iter_state *st = seq->private; |
212 | loff_t i; | ||
213 | 211 | ||
214 | /* strange seq_file api calls stop even if we fail, | 212 | for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { |
215 | * thus we need to grab lock since stop unlocks */ | 213 | if (!hlist_empty(&nf_ct_expect_hash[st->bucket])) |
216 | read_lock_bh(&nf_conntrack_lock); | 214 | return nf_ct_expect_hash[st->bucket].first; |
215 | } | ||
216 | return NULL; | ||
217 | } | ||
217 | 218 | ||
218 | if (list_empty(e)) | 219 | static struct hlist_node *ct_expect_get_next(struct seq_file *seq, |
219 | return NULL; | 220 | struct hlist_node *head) |
221 | { | ||
222 | struct ct_expect_iter_state *st = seq->private; | ||
220 | 223 | ||
221 | for (i = 0; i <= *pos; i++) { | 224 | head = head->next; |
222 | e = e->next; | 225 | while (head == NULL) { |
223 | if (e == &nf_conntrack_expect_list) | 226 | if (++st->bucket >= nf_ct_expect_hsize) |
224 | return NULL; | 227 | return NULL; |
228 | head = nf_ct_expect_hash[st->bucket].first; | ||
225 | } | 229 | } |
226 | return e; | 230 | return head; |
227 | } | 231 | } |
228 | 232 | ||
229 | static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) | 233 | static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos) |
230 | { | 234 | { |
231 | struct list_head *e = v; | 235 | struct hlist_node *head = ct_expect_get_first(seq); |
232 | 236 | ||
233 | ++*pos; | 237 | if (head) |
234 | e = e->next; | 238 | while (pos && (head = ct_expect_get_next(seq, head))) |
239 | pos--; | ||
240 | return pos ? NULL : head; | ||
241 | } | ||
235 | 242 | ||
236 | if (e == &nf_conntrack_expect_list) | 243 | static void *exp_seq_start(struct seq_file *seq, loff_t *pos) |
237 | return NULL; | 244 | { |
245 | read_lock_bh(&nf_conntrack_lock); | ||
246 | return ct_expect_get_idx(seq, *pos); | ||
247 | } | ||
238 | 248 | ||
239 | return e; | 249 | static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
250 | { | ||
251 | (*pos)++; | ||
252 | return ct_expect_get_next(seq, v); | ||
240 | } | 253 | } |
241 | 254 | ||
242 | static void exp_seq_stop(struct seq_file *s, void *v) | 255 | static void exp_seq_stop(struct seq_file *seq, void *v) |
243 | { | 256 | { |
244 | read_unlock_bh(&nf_conntrack_lock); | 257 | read_unlock_bh(&nf_conntrack_lock); |
245 | } | 258 | } |
246 | 259 | ||
247 | static int exp_seq_show(struct seq_file *s, void *v) | 260 | static int exp_seq_show(struct seq_file *s, void *v) |
248 | { | 261 | { |
249 | struct nf_conntrack_expect *exp = v; | 262 | struct nf_conntrack_expect *exp; |
263 | struct hlist_node *n = v; | ||
264 | |||
265 | exp = hlist_entry(n, struct nf_conntrack_expect, hnode); | ||
250 | 266 | ||
251 | if (exp->tuple.src.l3num != AF_INET) | 267 | if (exp->tuple.src.l3num != AF_INET) |
252 | return 0; | 268 | return 0; |
@@ -266,7 +282,7 @@ static int exp_seq_show(struct seq_file *s, void *v) | |||
266 | return seq_putc(s, '\n'); | 282 | return seq_putc(s, '\n'); |
267 | } | 283 | } |
268 | 284 | ||
269 | static struct seq_operations exp_seq_ops = { | 285 | static const struct seq_operations exp_seq_ops = { |
270 | .start = exp_seq_start, | 286 | .start = exp_seq_start, |
271 | .next = exp_seq_next, | 287 | .next = exp_seq_next, |
272 | .stop = exp_seq_stop, | 288 | .stop = exp_seq_stop, |
@@ -275,7 +291,23 @@ static struct seq_operations exp_seq_ops = { | |||
275 | 291 | ||
276 | static int exp_open(struct inode *inode, struct file *file) | 292 | static int exp_open(struct inode *inode, struct file *file) |
277 | { | 293 | { |
278 | return seq_open(file, &exp_seq_ops); | 294 | struct seq_file *seq; |
295 | struct ct_expect_iter_state *st; | ||
296 | int ret; | ||
297 | |||
298 | st = kmalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL); | ||
299 | if (st == NULL) | ||
300 | return -ENOMEM; | ||
301 | ret = seq_open(file, &exp_seq_ops); | ||
302 | if (ret) | ||
303 | goto out_free; | ||
304 | seq = file->private_data; | ||
305 | seq->private = st; | ||
306 | memset(st, 0, sizeof(struct ct_expect_iter_state)); | ||
307 | return ret; | ||
308 | out_free: | ||
309 | kfree(st); | ||
310 | return ret; | ||
279 | } | 311 | } |
280 | 312 | ||
281 | static const struct file_operations ip_exp_file_ops = { | 313 | static const struct file_operations ip_exp_file_ops = { |
@@ -283,7 +315,7 @@ static const struct file_operations ip_exp_file_ops = { | |||
283 | .open = exp_open, | 315 | .open = exp_open, |
284 | .read = seq_read, | 316 | .read = seq_read, |
285 | .llseek = seq_lseek, | 317 | .llseek = seq_lseek, |
286 | .release = seq_release | 318 | .release = seq_release_private, |
287 | }; | 319 | }; |
288 | 320 | ||
289 | static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) | 321 | static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) |
@@ -354,7 +386,7 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v) | |||
354 | return 0; | 386 | return 0; |
355 | } | 387 | } |
356 | 388 | ||
357 | static struct seq_operations ct_cpu_seq_ops = { | 389 | static const struct seq_operations ct_cpu_seq_ops = { |
358 | .start = ct_cpu_seq_start, | 390 | .start = ct_cpu_seq_start, |
359 | .next = ct_cpu_seq_next, | 391 | .next = ct_cpu_seq_next, |
360 | .stop = ct_cpu_seq_stop, | 392 | .stop = ct_cpu_seq_stop, |
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index f4fc657c1983..0fe8fb0466ef 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c | |||
@@ -21,12 +21,6 @@ | |||
21 | 21 | ||
22 | static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ; | 22 | static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ; |
23 | 23 | ||
24 | #if 0 | ||
25 | #define DEBUGP printk | ||
26 | #else | ||
27 | #define DEBUGP(format, args...) | ||
28 | #endif | ||
29 | |||
30 | static int icmp_pkt_to_tuple(const struct sk_buff *skb, | 24 | static int icmp_pkt_to_tuple(const struct sk_buff *skb, |
31 | unsigned int dataoff, | 25 | unsigned int dataoff, |
32 | struct nf_conntrack_tuple *tuple) | 26 | struct nf_conntrack_tuple *tuple) |
@@ -125,8 +119,8 @@ static int icmp_new(struct nf_conn *conntrack, | |||
125 | if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) | 119 | if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) |
126 | || !valid_new[conntrack->tuplehash[0].tuple.dst.u.icmp.type]) { | 120 | || !valid_new[conntrack->tuplehash[0].tuple.dst.u.icmp.type]) { |
127 | /* Can't create a new ICMP `conn' with this. */ | 121 | /* Can't create a new ICMP `conn' with this. */ |
128 | DEBUGP("icmp: can't create new conn with type %u\n", | 122 | pr_debug("icmp: can't create new conn with type %u\n", |
129 | conntrack->tuplehash[0].tuple.dst.u.icmp.type); | 123 | conntrack->tuplehash[0].tuple.dst.u.icmp.type); |
130 | NF_CT_DUMP_TUPLE(&conntrack->tuplehash[0].tuple); | 124 | NF_CT_DUMP_TUPLE(&conntrack->tuplehash[0].tuple); |
131 | return 0; | 125 | return 0; |
132 | } | 126 | } |
@@ -159,8 +153,8 @@ icmp_error_message(struct sk_buff *skb, | |||
159 | 153 | ||
160 | /* Ignore ICMP's containing fragments (shouldn't happen) */ | 154 | /* Ignore ICMP's containing fragments (shouldn't happen) */ |
161 | if (inside->ip.frag_off & htons(IP_OFFSET)) { | 155 | if (inside->ip.frag_off & htons(IP_OFFSET)) { |
162 | DEBUGP("icmp_error_message: fragment of proto %u\n", | 156 | pr_debug("icmp_error_message: fragment of proto %u\n", |
163 | inside->ip.protocol); | 157 | inside->ip.protocol); |
164 | return -NF_ACCEPT; | 158 | return -NF_ACCEPT; |
165 | } | 159 | } |
166 | 160 | ||
@@ -172,8 +166,8 @@ icmp_error_message(struct sk_buff *skb, | |||
172 | if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET, | 166 | if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET, |
173 | inside->ip.protocol, &origtuple, | 167 | inside->ip.protocol, &origtuple, |
174 | &nf_conntrack_l3proto_ipv4, innerproto)) { | 168 | &nf_conntrack_l3proto_ipv4, innerproto)) { |
175 | DEBUGP("icmp_error_message: ! get_tuple p=%u", | 169 | pr_debug("icmp_error_message: ! get_tuple p=%u", |
176 | inside->ip.protocol); | 170 | inside->ip.protocol); |
177 | return -NF_ACCEPT; | 171 | return -NF_ACCEPT; |
178 | } | 172 | } |
179 | 173 | ||
@@ -181,22 +175,22 @@ icmp_error_message(struct sk_buff *skb, | |||
181 | been preserved inside the ICMP. */ | 175 | been preserved inside the ICMP. */ |
182 | if (!nf_ct_invert_tuple(&innertuple, &origtuple, | 176 | if (!nf_ct_invert_tuple(&innertuple, &origtuple, |
183 | &nf_conntrack_l3proto_ipv4, innerproto)) { | 177 | &nf_conntrack_l3proto_ipv4, innerproto)) { |
184 | DEBUGP("icmp_error_message: no match\n"); | 178 | pr_debug("icmp_error_message: no match\n"); |
185 | return -NF_ACCEPT; | 179 | return -NF_ACCEPT; |
186 | } | 180 | } |
187 | 181 | ||
188 | *ctinfo = IP_CT_RELATED; | 182 | *ctinfo = IP_CT_RELATED; |
189 | 183 | ||
190 | h = nf_conntrack_find_get(&innertuple, NULL); | 184 | h = nf_conntrack_find_get(&innertuple); |
191 | if (!h) { | 185 | if (!h) { |
192 | /* Locally generated ICMPs will match inverted if they | 186 | /* Locally generated ICMPs will match inverted if they |
193 | haven't been SNAT'ed yet */ | 187 | haven't been SNAT'ed yet */ |
194 | /* FIXME: NAT code has to handle half-done double NAT --RR */ | 188 | /* FIXME: NAT code has to handle half-done double NAT --RR */ |
195 | if (hooknum == NF_IP_LOCAL_OUT) | 189 | if (hooknum == NF_IP_LOCAL_OUT) |
196 | h = nf_conntrack_find_get(&origtuple, NULL); | 190 | h = nf_conntrack_find_get(&origtuple); |
197 | 191 | ||
198 | if (!h) { | 192 | if (!h) { |
199 | DEBUGP("icmp_error_message: no match\n"); | 193 | pr_debug("icmp_error_message: no match\n"); |
200 | return -NF_ACCEPT; | 194 | return -NF_ACCEPT; |
201 | } | 195 | } |
202 | 196 | ||
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c index 0f17098917bc..bd93a1d71052 100644 --- a/net/ipv4/netfilter/nf_nat_amanda.c +++ b/net/ipv4/netfilter/nf_nat_amanda.c | |||
@@ -45,7 +45,7 @@ static unsigned int help(struct sk_buff **pskb, | |||
45 | /* Try to get same port: if not, try to change it. */ | 45 | /* Try to get same port: if not, try to change it. */ |
46 | for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { | 46 | for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { |
47 | exp->tuple.dst.u.tcp.port = htons(port); | 47 | exp->tuple.dst.u.tcp.port = htons(port); |
48 | if (nf_conntrack_expect_related(exp) == 0) | 48 | if (nf_ct_expect_related(exp) == 0) |
49 | break; | 49 | break; |
50 | } | 50 | } |
51 | 51 | ||
@@ -57,7 +57,7 @@ static unsigned int help(struct sk_buff **pskb, | |||
57 | matchoff, matchlen, | 57 | matchoff, matchlen, |
58 | buffer, strlen(buffer)); | 58 | buffer, strlen(buffer)); |
59 | if (ret != NF_ACCEPT) | 59 | if (ret != NF_ACCEPT) |
60 | nf_conntrack_unexpect_related(exp); | 60 | nf_ct_unexpect_related(exp); |
61 | return ret; | 61 | return ret; |
62 | } | 62 | } |
63 | 63 | ||
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index ea02f00d2dac..e848d8d6292f 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/timer.h> | 13 | #include <linux/timer.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/vmalloc.h> | ||
16 | #include <net/checksum.h> | 15 | #include <net/checksum.h> |
17 | #include <net/icmp.h> | 16 | #include <net/icmp.h> |
18 | #include <net/ip.h> | 17 | #include <net/ip.h> |
@@ -32,20 +31,15 @@ | |||
32 | #include <net/netfilter/nf_conntrack_l3proto.h> | 31 | #include <net/netfilter/nf_conntrack_l3proto.h> |
33 | #include <net/netfilter/nf_conntrack_l4proto.h> | 32 | #include <net/netfilter/nf_conntrack_l4proto.h> |
34 | 33 | ||
35 | #if 0 | ||
36 | #define DEBUGP printk | ||
37 | #else | ||
38 | #define DEBUGP(format, args...) | ||
39 | #endif | ||
40 | |||
41 | static DEFINE_RWLOCK(nf_nat_lock); | 34 | static DEFINE_RWLOCK(nf_nat_lock); |
42 | 35 | ||
43 | static struct nf_conntrack_l3proto *l3proto = NULL; | 36 | static struct nf_conntrack_l3proto *l3proto = NULL; |
44 | 37 | ||
45 | /* Calculated at init based on memory size */ | 38 | /* Calculated at init based on memory size */ |
46 | static unsigned int nf_nat_htable_size; | 39 | static unsigned int nf_nat_htable_size; |
40 | static int nf_nat_vmalloced; | ||
47 | 41 | ||
48 | static struct list_head *bysource; | 42 | static struct hlist_head *bysource; |
49 | 43 | ||
50 | #define MAX_IP_NAT_PROTO 256 | 44 | #define MAX_IP_NAT_PROTO 256 |
51 | static struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]; | 45 | static struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]; |
@@ -87,19 +81,6 @@ hash_by_src(const struct nf_conntrack_tuple *tuple) | |||
87 | tuple->dst.protonum, 0) % nf_nat_htable_size; | 81 | tuple->dst.protonum, 0) % nf_nat_htable_size; |
88 | } | 82 | } |
89 | 83 | ||
90 | /* Noone using conntrack by the time this called. */ | ||
91 | static void nf_nat_cleanup_conntrack(struct nf_conn *conn) | ||
92 | { | ||
93 | struct nf_conn_nat *nat; | ||
94 | if (!(conn->status & IPS_NAT_DONE_MASK)) | ||
95 | return; | ||
96 | |||
97 | nat = nfct_nat(conn); | ||
98 | write_lock_bh(&nf_nat_lock); | ||
99 | list_del(&nat->info.bysource); | ||
100 | write_unlock_bh(&nf_nat_lock); | ||
101 | } | ||
102 | |||
103 | /* Is this tuple already taken? (not by us) */ | 84 | /* Is this tuple already taken? (not by us) */ |
104 | int | 85 | int |
105 | nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, | 86 | nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, |
@@ -166,10 +147,11 @@ find_appropriate_src(const struct nf_conntrack_tuple *tuple, | |||
166 | unsigned int h = hash_by_src(tuple); | 147 | unsigned int h = hash_by_src(tuple); |
167 | struct nf_conn_nat *nat; | 148 | struct nf_conn_nat *nat; |
168 | struct nf_conn *ct; | 149 | struct nf_conn *ct; |
150 | struct hlist_node *n; | ||
169 | 151 | ||
170 | read_lock_bh(&nf_nat_lock); | 152 | read_lock_bh(&nf_nat_lock); |
171 | list_for_each_entry(nat, &bysource[h], info.bysource) { | 153 | hlist_for_each_entry(nat, n, &bysource[h], bysource) { |
172 | ct = (struct nf_conn *)((char *)nat - offsetof(struct nf_conn, data)); | 154 | ct = nat->ct; |
173 | if (same_src(ct, tuple)) { | 155 | if (same_src(ct, tuple)) { |
174 | /* Copy source part from reply tuple. */ | 156 | /* Copy source part from reply tuple. */ |
175 | nf_ct_invert_tuplepr(result, | 157 | nf_ct_invert_tuplepr(result, |
@@ -254,7 +236,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple, | |||
254 | manips not an issue. */ | 236 | manips not an issue. */ |
255 | if (maniptype == IP_NAT_MANIP_SRC) { | 237 | if (maniptype == IP_NAT_MANIP_SRC) { |
256 | if (find_appropriate_src(orig_tuple, tuple, range)) { | 238 | if (find_appropriate_src(orig_tuple, tuple, range)) { |
257 | DEBUGP("get_unique_tuple: Found current src map\n"); | 239 | pr_debug("get_unique_tuple: Found current src map\n"); |
258 | if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) | 240 | if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) |
259 | if (!nf_nat_used_tuple(tuple, ct)) | 241 | if (!nf_nat_used_tuple(tuple, ct)) |
260 | return; | 242 | return; |
@@ -296,11 +278,20 @@ nf_nat_setup_info(struct nf_conn *ct, | |||
296 | unsigned int hooknum) | 278 | unsigned int hooknum) |
297 | { | 279 | { |
298 | struct nf_conntrack_tuple curr_tuple, new_tuple; | 280 | struct nf_conntrack_tuple curr_tuple, new_tuple; |
299 | struct nf_conn_nat *nat = nfct_nat(ct); | 281 | struct nf_conn_nat *nat; |
300 | struct nf_nat_info *info = &nat->info; | ||
301 | int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK); | 282 | int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK); |
302 | enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum); | 283 | enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum); |
303 | 284 | ||
285 | /* nat helper or nfctnetlink also setup binding */ | ||
286 | nat = nfct_nat(ct); | ||
287 | if (!nat) { | ||
288 | nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); | ||
289 | if (nat == NULL) { | ||
290 | pr_debug("failed to add NAT extension\n"); | ||
291 | return NF_ACCEPT; | ||
292 | } | ||
293 | } | ||
294 | |||
304 | NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING || | 295 | NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING || |
305 | hooknum == NF_IP_POST_ROUTING || | 296 | hooknum == NF_IP_POST_ROUTING || |
306 | hooknum == NF_IP_LOCAL_IN || | 297 | hooknum == NF_IP_LOCAL_IN || |
@@ -337,7 +328,10 @@ nf_nat_setup_info(struct nf_conn *ct, | |||
337 | 328 | ||
338 | srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | 329 | srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
339 | write_lock_bh(&nf_nat_lock); | 330 | write_lock_bh(&nf_nat_lock); |
340 | list_add(&info->bysource, &bysource[srchash]); | 331 | /* nf_conntrack_alter_reply might re-allocate exntension aera */ |
332 | nat = nfct_nat(ct); | ||
333 | nat->ct = ct; | ||
334 | hlist_add_head(&nat->bysource, &bysource[srchash]); | ||
341 | write_unlock_bh(&nf_nat_lock); | 335 | write_unlock_bh(&nf_nat_lock); |
342 | } | 336 | } |
343 | 337 | ||
@@ -462,8 +456,9 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, | |||
462 | return 0; | 456 | return 0; |
463 | } | 457 | } |
464 | 458 | ||
465 | DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n", | 459 | pr_debug("icmp_reply_translation: translating error %p manip %u " |
466 | *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); | 460 | "dir %s\n", *pskb, manip, |
461 | dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); | ||
467 | 462 | ||
468 | /* rcu_read_lock()ed by nf_hook_slow */ | 463 | /* rcu_read_lock()ed by nf_hook_slow */ |
469 | l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); | 464 | l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); |
@@ -590,17 +585,69 @@ nf_nat_port_nfattr_to_range(struct nfattr *tb[], struct nf_nat_range *range) | |||
590 | EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nfattr); | 585 | EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nfattr); |
591 | #endif | 586 | #endif |
592 | 587 | ||
588 | /* Noone using conntrack by the time this called. */ | ||
589 | static void nf_nat_cleanup_conntrack(struct nf_conn *ct) | ||
590 | { | ||
591 | struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT); | ||
592 | |||
593 | if (nat == NULL || nat->ct == NULL) | ||
594 | return; | ||
595 | |||
596 | NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK); | ||
597 | |||
598 | write_lock_bh(&nf_nat_lock); | ||
599 | hlist_del(&nat->bysource); | ||
600 | nat->ct = NULL; | ||
601 | write_unlock_bh(&nf_nat_lock); | ||
602 | } | ||
603 | |||
604 | static void nf_nat_move_storage(struct nf_conn *conntrack, void *old) | ||
605 | { | ||
606 | struct nf_conn_nat *new_nat = nf_ct_ext_find(conntrack, NF_CT_EXT_NAT); | ||
607 | struct nf_conn_nat *old_nat = (struct nf_conn_nat *)old; | ||
608 | struct nf_conn *ct = old_nat->ct; | ||
609 | unsigned int srchash; | ||
610 | |||
611 | if (!(ct->status & IPS_NAT_DONE_MASK)) | ||
612 | return; | ||
613 | |||
614 | srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | ||
615 | |||
616 | write_lock_bh(&nf_nat_lock); | ||
617 | hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource); | ||
618 | new_nat->ct = ct; | ||
619 | write_unlock_bh(&nf_nat_lock); | ||
620 | } | ||
621 | |||
622 | static struct nf_ct_ext_type nat_extend __read_mostly = { | ||
623 | .len = sizeof(struct nf_conn_nat), | ||
624 | .align = __alignof__(struct nf_conn_nat), | ||
625 | .destroy = nf_nat_cleanup_conntrack, | ||
626 | .move = nf_nat_move_storage, | ||
627 | .id = NF_CT_EXT_NAT, | ||
628 | .flags = NF_CT_EXT_F_PREALLOC, | ||
629 | }; | ||
630 | |||
593 | static int __init nf_nat_init(void) | 631 | static int __init nf_nat_init(void) |
594 | { | 632 | { |
595 | size_t i; | 633 | size_t i; |
634 | int ret; | ||
635 | |||
636 | ret = nf_ct_extend_register(&nat_extend); | ||
637 | if (ret < 0) { | ||
638 | printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); | ||
639 | return ret; | ||
640 | } | ||
596 | 641 | ||
597 | /* Leave them the same for the moment. */ | 642 | /* Leave them the same for the moment. */ |
598 | nf_nat_htable_size = nf_conntrack_htable_size; | 643 | nf_nat_htable_size = nf_conntrack_htable_size; |
599 | 644 | ||
600 | /* One vmalloc for both hash tables */ | 645 | bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, |
601 | bysource = vmalloc(sizeof(struct list_head) * nf_nat_htable_size); | 646 | &nf_nat_vmalloced); |
602 | if (!bysource) | 647 | if (!bysource) { |
603 | return -ENOMEM; | 648 | ret = -ENOMEM; |
649 | goto cleanup_extend; | ||
650 | } | ||
604 | 651 | ||
605 | /* Sew in builtin protocols. */ | 652 | /* Sew in builtin protocols. */ |
606 | write_lock_bh(&nf_nat_lock); | 653 | write_lock_bh(&nf_nat_lock); |
@@ -612,18 +659,18 @@ static int __init nf_nat_init(void) | |||
612 | write_unlock_bh(&nf_nat_lock); | 659 | write_unlock_bh(&nf_nat_lock); |
613 | 660 | ||
614 | for (i = 0; i < nf_nat_htable_size; i++) { | 661 | for (i = 0; i < nf_nat_htable_size; i++) { |
615 | INIT_LIST_HEAD(&bysource[i]); | 662 | INIT_HLIST_HEAD(&bysource[i]); |
616 | } | 663 | } |
617 | 664 | ||
618 | /* FIXME: Man, this is a hack. <SIGH> */ | ||
619 | NF_CT_ASSERT(rcu_dereference(nf_conntrack_destroyed) == NULL); | ||
620 | rcu_assign_pointer(nf_conntrack_destroyed, nf_nat_cleanup_conntrack); | ||
621 | |||
622 | /* Initialize fake conntrack so that NAT will skip it */ | 665 | /* Initialize fake conntrack so that NAT will skip it */ |
623 | nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; | 666 | nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; |
624 | 667 | ||
625 | l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); | 668 | l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); |
626 | return 0; | 669 | return 0; |
670 | |||
671 | cleanup_extend: | ||
672 | nf_ct_extend_unregister(&nat_extend); | ||
673 | return ret; | ||
627 | } | 674 | } |
628 | 675 | ||
629 | /* Clear NAT section of all conntracks, in case we're loaded again. */ | 676 | /* Clear NAT section of all conntracks, in case we're loaded again. */ |
@@ -641,10 +688,10 @@ static int clean_nat(struct nf_conn *i, void *data) | |||
641 | static void __exit nf_nat_cleanup(void) | 688 | static void __exit nf_nat_cleanup(void) |
642 | { | 689 | { |
643 | nf_ct_iterate_cleanup(&clean_nat, NULL); | 690 | nf_ct_iterate_cleanup(&clean_nat, NULL); |
644 | rcu_assign_pointer(nf_conntrack_destroyed, NULL); | ||
645 | synchronize_rcu(); | 691 | synchronize_rcu(); |
646 | vfree(bysource); | 692 | nf_ct_free_hashtable(bysource, nf_nat_vmalloced, nf_nat_htable_size); |
647 | nf_ct_l3proto_put(l3proto); | 693 | nf_ct_l3proto_put(l3proto); |
694 | nf_ct_extend_unregister(&nat_extend); | ||
648 | } | 695 | } |
649 | 696 | ||
650 | MODULE_LICENSE("GPL"); | 697 | MODULE_LICENSE("GPL"); |
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c index e6bc8e5a72f1..3663bd879c39 100644 --- a/net/ipv4/netfilter/nf_nat_ftp.c +++ b/net/ipv4/netfilter/nf_nat_ftp.c | |||
@@ -25,12 +25,6 @@ MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); | |||
25 | MODULE_DESCRIPTION("ftp NAT helper"); | 25 | MODULE_DESCRIPTION("ftp NAT helper"); |
26 | MODULE_ALIAS("ip_nat_ftp"); | 26 | MODULE_ALIAS("ip_nat_ftp"); |
27 | 27 | ||
28 | #if 0 | ||
29 | #define DEBUGP printk | ||
30 | #else | ||
31 | #define DEBUGP(format, args...) | ||
32 | #endif | ||
33 | |||
34 | /* FIXME: Time out? --RR */ | 28 | /* FIXME: Time out? --RR */ |
35 | 29 | ||
36 | static int | 30 | static int |
@@ -47,7 +41,7 @@ mangle_rfc959_packet(struct sk_buff **pskb, | |||
47 | sprintf(buffer, "%u,%u,%u,%u,%u,%u", | 41 | sprintf(buffer, "%u,%u,%u,%u,%u,%u", |
48 | NIPQUAD(newip), port>>8, port&0xFF); | 42 | NIPQUAD(newip), port>>8, port&0xFF); |
49 | 43 | ||
50 | DEBUGP("calling nf_nat_mangle_tcp_packet\n"); | 44 | pr_debug("calling nf_nat_mangle_tcp_packet\n"); |
51 | 45 | ||
52 | return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, | 46 | return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, |
53 | matchlen, buffer, strlen(buffer)); | 47 | matchlen, buffer, strlen(buffer)); |
@@ -67,7 +61,7 @@ mangle_eprt_packet(struct sk_buff **pskb, | |||
67 | 61 | ||
68 | sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port); | 62 | sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port); |
69 | 63 | ||
70 | DEBUGP("calling nf_nat_mangle_tcp_packet\n"); | 64 | pr_debug("calling nf_nat_mangle_tcp_packet\n"); |
71 | 65 | ||
72 | return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, | 66 | return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, |
73 | matchlen, buffer, strlen(buffer)); | 67 | matchlen, buffer, strlen(buffer)); |
@@ -87,7 +81,7 @@ mangle_epsv_packet(struct sk_buff **pskb, | |||
87 | 81 | ||
88 | sprintf(buffer, "|||%u|", port); | 82 | sprintf(buffer, "|||%u|", port); |
89 | 83 | ||
90 | DEBUGP("calling nf_nat_mangle_tcp_packet\n"); | 84 | pr_debug("calling nf_nat_mangle_tcp_packet\n"); |
91 | 85 | ||
92 | return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, | 86 | return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, |
93 | matchlen, buffer, strlen(buffer)); | 87 | matchlen, buffer, strlen(buffer)); |
@@ -117,7 +111,7 @@ static unsigned int nf_nat_ftp(struct sk_buff **pskb, | |||
117 | int dir = CTINFO2DIR(ctinfo); | 111 | int dir = CTINFO2DIR(ctinfo); |
118 | struct nf_conn *ct = exp->master; | 112 | struct nf_conn *ct = exp->master; |
119 | 113 | ||
120 | DEBUGP("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen); | 114 | pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen); |
121 | 115 | ||
122 | /* Connection will come from wherever this packet goes, hence !dir */ | 116 | /* Connection will come from wherever this packet goes, hence !dir */ |
123 | newip = ct->tuplehash[!dir].tuple.dst.u3.ip; | 117 | newip = ct->tuplehash[!dir].tuple.dst.u3.ip; |
@@ -131,7 +125,7 @@ static unsigned int nf_nat_ftp(struct sk_buff **pskb, | |||
131 | /* Try to get same port: if not, try to change it. */ | 125 | /* Try to get same port: if not, try to change it. */ |
132 | for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { | 126 | for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { |
133 | exp->tuple.dst.u.tcp.port = htons(port); | 127 | exp->tuple.dst.u.tcp.port = htons(port); |
134 | if (nf_conntrack_expect_related(exp) == 0) | 128 | if (nf_ct_expect_related(exp) == 0) |
135 | break; | 129 | break; |
136 | } | 130 | } |
137 | 131 | ||
@@ -139,7 +133,7 @@ static unsigned int nf_nat_ftp(struct sk_buff **pskb, | |||
139 | return NF_DROP; | 133 | return NF_DROP; |
140 | 134 | ||
141 | if (!mangle[type](pskb, newip, port, matchoff, matchlen, ct, ctinfo)) { | 135 | if (!mangle[type](pskb, newip, port, matchoff, matchlen, ct, ctinfo)) { |
142 | nf_conntrack_unexpect_related(exp); | 136 | nf_ct_unexpect_related(exp); |
143 | return NF_DROP; | 137 | return NF_DROP; |
144 | } | 138 | } |
145 | return NF_ACCEPT; | 139 | return NF_ACCEPT; |
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index c5d2a2d690b8..c1b059a73708 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c | |||
@@ -21,12 +21,6 @@ | |||
21 | #include <net/netfilter/nf_conntrack_expect.h> | 21 | #include <net/netfilter/nf_conntrack_expect.h> |
22 | #include <linux/netfilter/nf_conntrack_h323.h> | 22 | #include <linux/netfilter/nf_conntrack_h323.h> |
23 | 23 | ||
24 | #if 0 | ||
25 | #define DEBUGP printk | ||
26 | #else | ||
27 | #define DEBUGP(format, args...) | ||
28 | #endif | ||
29 | |||
30 | /****************************************************************************/ | 24 | /****************************************************************************/ |
31 | static int set_addr(struct sk_buff **pskb, | 25 | static int set_addr(struct sk_buff **pskb, |
32 | unsigned char **data, int dataoff, | 26 | unsigned char **data, int dataoff, |
@@ -126,12 +120,11 @@ static int set_sig_addr(struct sk_buff **pskb, struct nf_conn *ct, | |||
126 | (ntohl(addr.ip) & 0xff000000) == 0x7f000000) | 120 | (ntohl(addr.ip) & 0xff000000) == 0x7f000000) |
127 | i = 0; | 121 | i = 0; |
128 | 122 | ||
129 | DEBUGP | 123 | pr_debug("nf_nat_ras: set signal address " |
130 | ("nf_nat_ras: set signal address " | 124 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", |
131 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 125 | NIPQUAD(addr.ip), port, |
132 | NIPQUAD(ip), port, | 126 | NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip), |
133 | NIPQUAD(ct->tuplehash[!dir].tuple.dst. | 127 | info->sig_port[!dir]); |
134 | ip), info->sig_port[!dir]); | ||
135 | return set_h225_addr(pskb, data, 0, &taddr[i], | 128 | return set_h225_addr(pskb, data, 0, &taddr[i], |
136 | &ct->tuplehash[!dir]. | 129 | &ct->tuplehash[!dir]. |
137 | tuple.dst.u3, | 130 | tuple.dst.u3, |
@@ -139,12 +132,11 @@ static int set_sig_addr(struct sk_buff **pskb, struct nf_conn *ct, | |||
139 | } else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && | 132 | } else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && |
140 | port == info->sig_port[dir]) { | 133 | port == info->sig_port[dir]) { |
141 | /* GK->GW */ | 134 | /* GK->GW */ |
142 | DEBUGP | 135 | pr_debug("nf_nat_ras: set signal address " |
143 | ("nf_nat_ras: set signal address " | 136 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", |
144 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 137 | NIPQUAD(addr.ip), port, |
145 | NIPQUAD(ip), port, | 138 | NIPQUAD(ct->tuplehash[!dir].tuple.src.u3.ip), |
146 | NIPQUAD(ct->tuplehash[!dir].tuple.src. | 139 | info->sig_port[!dir]); |
147 | ip), info->sig_port[!dir]); | ||
148 | return set_h225_addr(pskb, data, 0, &taddr[i], | 140 | return set_h225_addr(pskb, data, 0, &taddr[i], |
149 | &ct->tuplehash[!dir]. | 141 | &ct->tuplehash[!dir]. |
150 | tuple.src.u3, | 142 | tuple.src.u3, |
@@ -171,12 +163,11 @@ static int set_ras_addr(struct sk_buff **pskb, struct nf_conn *ct, | |||
171 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && | 163 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && |
172 | addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && | 164 | addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && |
173 | port == ct->tuplehash[dir].tuple.src.u.udp.port) { | 165 | port == ct->tuplehash[dir].tuple.src.u.udp.port) { |
174 | DEBUGP("nf_nat_ras: set rasAddress " | 166 | pr_debug("nf_nat_ras: set rasAddress " |
175 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 167 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", |
176 | NIPQUAD(ip), ntohs(port), | 168 | NIPQUAD(addr.ip), ntohs(port), |
177 | NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip), | 169 | NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip), |
178 | ntohs(ct->tuplehash[!dir].tuple.dst.u.udp. | 170 | ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port)); |
179 | port)); | ||
180 | return set_h225_addr(pskb, data, 0, &taddr[i], | 171 | return set_h225_addr(pskb, data, 0, &taddr[i], |
181 | &ct->tuplehash[!dir].tuple.dst.u3, | 172 | &ct->tuplehash[!dir].tuple.dst.u3, |
182 | ct->tuplehash[!dir].tuple. | 173 | ct->tuplehash[!dir].tuple. |
@@ -237,12 +228,12 @@ static int nat_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct, | |||
237 | for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port); | 228 | for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port); |
238 | nated_port != 0; nated_port += 2) { | 229 | nated_port != 0; nated_port += 2) { |
239 | rtp_exp->tuple.dst.u.udp.port = htons(nated_port); | 230 | rtp_exp->tuple.dst.u.udp.port = htons(nated_port); |
240 | if (nf_conntrack_expect_related(rtp_exp) == 0) { | 231 | if (nf_ct_expect_related(rtp_exp) == 0) { |
241 | rtcp_exp->tuple.dst.u.udp.port = | 232 | rtcp_exp->tuple.dst.u.udp.port = |
242 | htons(nated_port + 1); | 233 | htons(nated_port + 1); |
243 | if (nf_conntrack_expect_related(rtcp_exp) == 0) | 234 | if (nf_ct_expect_related(rtcp_exp) == 0) |
244 | break; | 235 | break; |
245 | nf_conntrack_unexpect_related(rtp_exp); | 236 | nf_ct_unexpect_related(rtp_exp); |
246 | } | 237 | } |
247 | } | 238 | } |
248 | 239 | ||
@@ -261,22 +252,22 @@ static int nat_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct, | |||
261 | info->rtp_port[i][dir] = rtp_port; | 252 | info->rtp_port[i][dir] = rtp_port; |
262 | info->rtp_port[i][!dir] = htons(nated_port); | 253 | info->rtp_port[i][!dir] = htons(nated_port); |
263 | } else { | 254 | } else { |
264 | nf_conntrack_unexpect_related(rtp_exp); | 255 | nf_ct_unexpect_related(rtp_exp); |
265 | nf_conntrack_unexpect_related(rtcp_exp); | 256 | nf_ct_unexpect_related(rtcp_exp); |
266 | return -1; | 257 | return -1; |
267 | } | 258 | } |
268 | 259 | ||
269 | /* Success */ | 260 | /* Success */ |
270 | DEBUGP("nf_nat_h323: expect RTP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 261 | pr_debug("nf_nat_h323: expect RTP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", |
271 | NIPQUAD(rtp_exp->tuple.src.ip), | 262 | NIPQUAD(rtp_exp->tuple.src.u3.ip), |
272 | ntohs(rtp_exp->tuple.src.u.udp.port), | 263 | ntohs(rtp_exp->tuple.src.u.udp.port), |
273 | NIPQUAD(rtp_exp->tuple.dst.ip), | 264 | NIPQUAD(rtp_exp->tuple.dst.u3.ip), |
274 | ntohs(rtp_exp->tuple.dst.u.udp.port)); | 265 | ntohs(rtp_exp->tuple.dst.u.udp.port)); |
275 | DEBUGP("nf_nat_h323: expect RTCP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 266 | pr_debug("nf_nat_h323: expect RTCP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", |
276 | NIPQUAD(rtcp_exp->tuple.src.ip), | 267 | NIPQUAD(rtcp_exp->tuple.src.u3.ip), |
277 | ntohs(rtcp_exp->tuple.src.u.udp.port), | 268 | ntohs(rtcp_exp->tuple.src.u.udp.port), |
278 | NIPQUAD(rtcp_exp->tuple.dst.ip), | 269 | NIPQUAD(rtcp_exp->tuple.dst.u3.ip), |
279 | ntohs(rtcp_exp->tuple.dst.u.udp.port)); | 270 | ntohs(rtcp_exp->tuple.dst.u.udp.port)); |
280 | 271 | ||
281 | return 0; | 272 | return 0; |
282 | } | 273 | } |
@@ -299,7 +290,7 @@ static int nat_t120(struct sk_buff **pskb, struct nf_conn *ct, | |||
299 | /* Try to get same port: if not, try to change it. */ | 290 | /* Try to get same port: if not, try to change it. */ |
300 | for (; nated_port != 0; nated_port++) { | 291 | for (; nated_port != 0; nated_port++) { |
301 | exp->tuple.dst.u.tcp.port = htons(nated_port); | 292 | exp->tuple.dst.u.tcp.port = htons(nated_port); |
302 | if (nf_conntrack_expect_related(exp) == 0) | 293 | if (nf_ct_expect_related(exp) == 0) |
303 | break; | 294 | break; |
304 | } | 295 | } |
305 | 296 | ||
@@ -313,13 +304,15 @@ static int nat_t120(struct sk_buff **pskb, struct nf_conn *ct, | |||
313 | if (set_h245_addr(pskb, data, dataoff, taddr, | 304 | if (set_h245_addr(pskb, data, dataoff, taddr, |
314 | &ct->tuplehash[!dir].tuple.dst.u3, | 305 | &ct->tuplehash[!dir].tuple.dst.u3, |
315 | htons(nated_port)) < 0) { | 306 | htons(nated_port)) < 0) { |
316 | nf_conntrack_unexpect_related(exp); | 307 | nf_ct_unexpect_related(exp); |
317 | return -1; | 308 | return -1; |
318 | } | 309 | } |
319 | 310 | ||
320 | DEBUGP("nf_nat_h323: expect T.120 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 311 | pr_debug("nf_nat_h323: expect T.120 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", |
321 | NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), | 312 | NIPQUAD(exp->tuple.src.u3.ip), |
322 | NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); | 313 | ntohs(exp->tuple.src.u.tcp.port), |
314 | NIPQUAD(exp->tuple.dst.u3.ip), | ||
315 | ntohs(exp->tuple.dst.u.tcp.port)); | ||
323 | 316 | ||
324 | return 0; | 317 | return 0; |
325 | } | 318 | } |
@@ -347,7 +340,7 @@ static int nat_h245(struct sk_buff **pskb, struct nf_conn *ct, | |||
347 | /* Try to get same port: if not, try to change it. */ | 340 | /* Try to get same port: if not, try to change it. */ |
348 | for (; nated_port != 0; nated_port++) { | 341 | for (; nated_port != 0; nated_port++) { |
349 | exp->tuple.dst.u.tcp.port = htons(nated_port); | 342 | exp->tuple.dst.u.tcp.port = htons(nated_port); |
350 | if (nf_conntrack_expect_related(exp) == 0) | 343 | if (nf_ct_expect_related(exp) == 0) |
351 | break; | 344 | break; |
352 | } | 345 | } |
353 | 346 | ||
@@ -365,13 +358,15 @@ static int nat_h245(struct sk_buff **pskb, struct nf_conn *ct, | |||
365 | info->sig_port[dir] = port; | 358 | info->sig_port[dir] = port; |
366 | info->sig_port[!dir] = htons(nated_port); | 359 | info->sig_port[!dir] = htons(nated_port); |
367 | } else { | 360 | } else { |
368 | nf_conntrack_unexpect_related(exp); | 361 | nf_ct_unexpect_related(exp); |
369 | return -1; | 362 | return -1; |
370 | } | 363 | } |
371 | 364 | ||
372 | DEBUGP("nf_nat_q931: expect H.245 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 365 | pr_debug("nf_nat_q931: expect H.245 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", |
373 | NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), | 366 | NIPQUAD(exp->tuple.src.u3.ip), |
374 | NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); | 367 | ntohs(exp->tuple.src.u.tcp.port), |
368 | NIPQUAD(exp->tuple.dst.u3.ip), | ||
369 | ntohs(exp->tuple.dst.u.tcp.port)); | ||
375 | 370 | ||
376 | return 0; | 371 | return 0; |
377 | } | 372 | } |
@@ -433,7 +428,7 @@ static int nat_q931(struct sk_buff **pskb, struct nf_conn *ct, | |||
433 | /* Try to get same port: if not, try to change it. */ | 428 | /* Try to get same port: if not, try to change it. */ |
434 | for (; nated_port != 0; nated_port++) { | 429 | for (; nated_port != 0; nated_port++) { |
435 | exp->tuple.dst.u.tcp.port = htons(nated_port); | 430 | exp->tuple.dst.u.tcp.port = htons(nated_port); |
436 | if (nf_conntrack_expect_related(exp) == 0) | 431 | if (nf_ct_expect_related(exp) == 0) |
437 | break; | 432 | break; |
438 | } | 433 | } |
439 | 434 | ||
@@ -460,14 +455,16 @@ static int nat_q931(struct sk_buff **pskb, struct nf_conn *ct, | |||
460 | info->sig_port[!dir]); | 455 | info->sig_port[!dir]); |
461 | } | 456 | } |
462 | } else { | 457 | } else { |
463 | nf_conntrack_unexpect_related(exp); | 458 | nf_ct_unexpect_related(exp); |
464 | return -1; | 459 | return -1; |
465 | } | 460 | } |
466 | 461 | ||
467 | /* Success */ | 462 | /* Success */ |
468 | DEBUGP("nf_nat_ras: expect Q.931 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 463 | pr_debug("nf_nat_ras: expect Q.931 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", |
469 | NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), | 464 | NIPQUAD(exp->tuple.src.u3.ip), |
470 | NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); | 465 | ntohs(exp->tuple.src.u.tcp.port), |
466 | NIPQUAD(exp->tuple.dst.u3.ip), | ||
467 | ntohs(exp->tuple.dst.u.tcp.port)); | ||
471 | 468 | ||
472 | return 0; | 469 | return 0; |
473 | } | 470 | } |
@@ -517,7 +514,7 @@ static int nat_callforwarding(struct sk_buff **pskb, struct nf_conn *ct, | |||
517 | /* Try to get same port: if not, try to change it. */ | 514 | /* Try to get same port: if not, try to change it. */ |
518 | for (nated_port = ntohs(port); nated_port != 0; nated_port++) { | 515 | for (nated_port = ntohs(port); nated_port != 0; nated_port++) { |
519 | exp->tuple.dst.u.tcp.port = htons(nated_port); | 516 | exp->tuple.dst.u.tcp.port = htons(nated_port); |
520 | if (nf_conntrack_expect_related(exp) == 0) | 517 | if (nf_ct_expect_related(exp) == 0) |
521 | break; | 518 | break; |
522 | } | 519 | } |
523 | 520 | ||
@@ -531,15 +528,17 @@ static int nat_callforwarding(struct sk_buff **pskb, struct nf_conn *ct, | |||
531 | if (!set_h225_addr(pskb, data, dataoff, taddr, | 528 | if (!set_h225_addr(pskb, data, dataoff, taddr, |
532 | &ct->tuplehash[!dir].tuple.dst.u3, | 529 | &ct->tuplehash[!dir].tuple.dst.u3, |
533 | htons(nated_port)) == 0) { | 530 | htons(nated_port)) == 0) { |
534 | nf_conntrack_unexpect_related(exp); | 531 | nf_ct_unexpect_related(exp); |
535 | return -1; | 532 | return -1; |
536 | } | 533 | } |
537 | 534 | ||
538 | /* Success */ | 535 | /* Success */ |
539 | DEBUGP("nf_nat_q931: expect Call Forwarding " | 536 | pr_debug("nf_nat_q931: expect Call Forwarding " |
540 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | 537 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", |
541 | NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), | 538 | NIPQUAD(exp->tuple.src.u3.ip), |
542 | NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); | 539 | ntohs(exp->tuple.src.u.tcp.port), |
540 | NIPQUAD(exp->tuple.dst.u3.ip), | ||
541 | ntohs(exp->tuple.dst.u.tcp.port)); | ||
543 | 542 | ||
544 | return 0; | 543 | return 0; |
545 | } | 544 | } |
@@ -566,8 +565,6 @@ static int __init init(void) | |||
566 | rcu_assign_pointer(nat_h245_hook, nat_h245); | 565 | rcu_assign_pointer(nat_h245_hook, nat_h245); |
567 | rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding); | 566 | rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding); |
568 | rcu_assign_pointer(nat_q931_hook, nat_q931); | 567 | rcu_assign_pointer(nat_q931_hook, nat_q931); |
569 | |||
570 | DEBUGP("nf_nat_h323: init success\n"); | ||
571 | return 0; | 568 | return 0; |
572 | } | 569 | } |
573 | 570 | ||
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c index 15b6e5ce3a04..93d8a0a8f035 100644 --- a/net/ipv4/netfilter/nf_nat_helper.c +++ b/net/ipv4/netfilter/nf_nat_helper.c | |||
@@ -26,13 +26,9 @@ | |||
26 | #include <net/netfilter/nf_nat_core.h> | 26 | #include <net/netfilter/nf_nat_core.h> |
27 | #include <net/netfilter/nf_nat_helper.h> | 27 | #include <net/netfilter/nf_nat_helper.h> |
28 | 28 | ||
29 | #if 0 | 29 | #define DUMP_OFFSET(x) \ |
30 | #define DEBUGP printk | 30 | pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \ |
31 | #define DUMP_OFFSET(x) printk("offset_before=%d, offset_after=%d, correction_pos=%u\n", x->offset_before, x->offset_after, x->correction_pos); | 31 | x->offset_before, x->offset_after, x->correction_pos); |
32 | #else | ||
33 | #define DEBUGP(format, args...) | ||
34 | #define DUMP_OFFSET(x) | ||
35 | #endif | ||
36 | 32 | ||
37 | static DEFINE_SPINLOCK(nf_nat_seqofs_lock); | 33 | static DEFINE_SPINLOCK(nf_nat_seqofs_lock); |
38 | 34 | ||
@@ -47,15 +43,15 @@ adjust_tcp_sequence(u32 seq, | |||
47 | struct nf_nat_seq *this_way, *other_way; | 43 | struct nf_nat_seq *this_way, *other_way; |
48 | struct nf_conn_nat *nat = nfct_nat(ct); | 44 | struct nf_conn_nat *nat = nfct_nat(ct); |
49 | 45 | ||
50 | DEBUGP("nf_nat_resize_packet: old_size = %u, new_size = %u\n", | 46 | pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n", |
51 | (*skb)->len, new_size); | 47 | ntohl(seq), seq); |
52 | 48 | ||
53 | dir = CTINFO2DIR(ctinfo); | 49 | dir = CTINFO2DIR(ctinfo); |
54 | 50 | ||
55 | this_way = &nat->info.seq[dir]; | 51 | this_way = &nat->seq[dir]; |
56 | other_way = &nat->info.seq[!dir]; | 52 | other_way = &nat->seq[!dir]; |
57 | 53 | ||
58 | DEBUGP("nf_nat_resize_packet: Seq_offset before: "); | 54 | pr_debug("nf_nat_resize_packet: Seq_offset before: "); |
59 | DUMP_OFFSET(this_way); | 55 | DUMP_OFFSET(this_way); |
60 | 56 | ||
61 | spin_lock_bh(&nf_nat_seqofs_lock); | 57 | spin_lock_bh(&nf_nat_seqofs_lock); |
@@ -72,7 +68,7 @@ adjust_tcp_sequence(u32 seq, | |||
72 | } | 68 | } |
73 | spin_unlock_bh(&nf_nat_seqofs_lock); | 69 | spin_unlock_bh(&nf_nat_seqofs_lock); |
74 | 70 | ||
75 | DEBUGP("nf_nat_resize_packet: Seq_offset after: "); | 71 | pr_debug("nf_nat_resize_packet: Seq_offset after: "); |
76 | DUMP_OFFSET(this_way); | 72 | DUMP_OFFSET(this_way); |
77 | } | 73 | } |
78 | 74 | ||
@@ -100,14 +96,12 @@ static void mangle_contents(struct sk_buff *skb, | |||
100 | 96 | ||
101 | /* update skb info */ | 97 | /* update skb info */ |
102 | if (rep_len > match_len) { | 98 | if (rep_len > match_len) { |
103 | DEBUGP("nf_nat_mangle_packet: Extending packet by " | 99 | pr_debug("nf_nat_mangle_packet: Extending packet by " |
104 | "%u from %u bytes\n", rep_len - match_len, | 100 | "%u from %u bytes\n", rep_len - match_len, skb->len); |
105 | skb->len); | ||
106 | skb_put(skb, rep_len - match_len); | 101 | skb_put(skb, rep_len - match_len); |
107 | } else { | 102 | } else { |
108 | DEBUGP("nf_nat_mangle_packet: Shrinking packet from " | 103 | pr_debug("nf_nat_mangle_packet: Shrinking packet from " |
109 | "%u from %u bytes\n", match_len - rep_len, | 104 | "%u from %u bytes\n", match_len - rep_len, skb->len); |
110 | skb->len); | ||
111 | __skb_trim(skb, skb->len + rep_len - match_len); | 105 | __skb_trim(skb, skb->len + rep_len - match_len); |
112 | } | 106 | } |
113 | 107 | ||
@@ -178,7 +172,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb, | |||
178 | datalen = (*pskb)->len - iph->ihl*4; | 172 | datalen = (*pskb)->len - iph->ihl*4; |
179 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { | 173 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { |
180 | if (!(rt->rt_flags & RTCF_LOCAL) && | 174 | if (!(rt->rt_flags & RTCF_LOCAL) && |
181 | (*pskb)->dev->features & NETIF_F_ALL_CSUM) { | 175 | (*pskb)->dev->features & NETIF_F_V4_CSUM) { |
182 | (*pskb)->ip_summed = CHECKSUM_PARTIAL; | 176 | (*pskb)->ip_summed = CHECKSUM_PARTIAL; |
183 | (*pskb)->csum_start = skb_headroom(*pskb) + | 177 | (*pskb)->csum_start = skb_headroom(*pskb) + |
184 | skb_network_offset(*pskb) + | 178 | skb_network_offset(*pskb) + |
@@ -190,7 +184,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb, | |||
190 | tcph->check = 0; | 184 | tcph->check = 0; |
191 | tcph->check = tcp_v4_check(datalen, | 185 | tcph->check = tcp_v4_check(datalen, |
192 | iph->saddr, iph->daddr, | 186 | iph->saddr, iph->daddr, |
193 | csum_partial((char *)tcph, | 187 | csum_partial(tcph, |
194 | datalen, 0)); | 188 | datalen, 0)); |
195 | } | 189 | } |
196 | } else | 190 | } else |
@@ -265,7 +259,7 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb, | |||
265 | 259 | ||
266 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { | 260 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { |
267 | if (!(rt->rt_flags & RTCF_LOCAL) && | 261 | if (!(rt->rt_flags & RTCF_LOCAL) && |
268 | (*pskb)->dev->features & NETIF_F_ALL_CSUM) { | 262 | (*pskb)->dev->features & NETIF_F_V4_CSUM) { |
269 | (*pskb)->ip_summed = CHECKSUM_PARTIAL; | 263 | (*pskb)->ip_summed = CHECKSUM_PARTIAL; |
270 | (*pskb)->csum_start = skb_headroom(*pskb) + | 264 | (*pskb)->csum_start = skb_headroom(*pskb) + |
271 | skb_network_offset(*pskb) + | 265 | skb_network_offset(*pskb) + |
@@ -278,7 +272,7 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb, | |||
278 | udph->check = 0; | 272 | udph->check = 0; |
279 | udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, | 273 | udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, |
280 | datalen, IPPROTO_UDP, | 274 | datalen, IPPROTO_UDP, |
281 | csum_partial((char *)udph, | 275 | csum_partial(udph, |
282 | datalen, 0)); | 276 | datalen, 0)); |
283 | if (!udph->check) | 277 | if (!udph->check) |
284 | udph->check = CSUM_MANGLED_0; | 278 | udph->check = CSUM_MANGLED_0; |
@@ -320,9 +314,9 @@ sack_adjust(struct sk_buff *skb, | |||
320 | new_end_seq = htonl(ntohl(sack->end_seq) | 314 | new_end_seq = htonl(ntohl(sack->end_seq) |
321 | - natseq->offset_before); | 315 | - natseq->offset_before); |
322 | 316 | ||
323 | DEBUGP("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n", | 317 | pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n", |
324 | ntohl(sack->start_seq), new_start_seq, | 318 | ntohl(sack->start_seq), new_start_seq, |
325 | ntohl(sack->end_seq), new_end_seq); | 319 | ntohl(sack->end_seq), new_end_seq); |
326 | 320 | ||
327 | nf_proto_csum_replace4(&tcph->check, skb, | 321 | nf_proto_csum_replace4(&tcph->check, skb, |
328 | sack->start_seq, new_start_seq, 0); | 322 | sack->start_seq, new_start_seq, 0); |
@@ -372,8 +366,7 @@ nf_nat_sack_adjust(struct sk_buff **pskb, | |||
372 | op[1] >= 2+TCPOLEN_SACK_PERBLOCK && | 366 | op[1] >= 2+TCPOLEN_SACK_PERBLOCK && |
373 | ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0) | 367 | ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0) |
374 | sack_adjust(*pskb, tcph, optoff+2, | 368 | sack_adjust(*pskb, tcph, optoff+2, |
375 | optoff+op[1], | 369 | optoff+op[1], &nat->seq[!dir]); |
376 | &nat->info.seq[!dir]); | ||
377 | optoff += op[1]; | 370 | optoff += op[1]; |
378 | } | 371 | } |
379 | } | 372 | } |
@@ -394,8 +387,8 @@ nf_nat_seq_adjust(struct sk_buff **pskb, | |||
394 | 387 | ||
395 | dir = CTINFO2DIR(ctinfo); | 388 | dir = CTINFO2DIR(ctinfo); |
396 | 389 | ||
397 | this_way = &nat->info.seq[dir]; | 390 | this_way = &nat->seq[dir]; |
398 | other_way = &nat->info.seq[!dir]; | 391 | other_way = &nat->seq[!dir]; |
399 | 392 | ||
400 | if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph))) | 393 | if (!skb_make_writable(pskb, ip_hdrlen(*pskb) + sizeof(*tcph))) |
401 | return 0; | 394 | return 0; |
@@ -415,9 +408,9 @@ nf_nat_seq_adjust(struct sk_buff **pskb, | |||
415 | nf_proto_csum_replace4(&tcph->check, *pskb, tcph->seq, newseq, 0); | 408 | nf_proto_csum_replace4(&tcph->check, *pskb, tcph->seq, newseq, 0); |
416 | nf_proto_csum_replace4(&tcph->check, *pskb, tcph->ack_seq, newack, 0); | 409 | nf_proto_csum_replace4(&tcph->check, *pskb, tcph->ack_seq, newack, 0); |
417 | 410 | ||
418 | DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n", | 411 | pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n", |
419 | ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq), | 412 | ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq), |
420 | ntohl(newack)); | 413 | ntohl(newack)); |
421 | 414 | ||
422 | tcph->seq = newseq; | 415 | tcph->seq = newseq; |
423 | tcph->ack_seq = newack; | 416 | tcph->ack_seq = newack; |
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c index 9b8c0daea744..bcf274bba602 100644 --- a/net/ipv4/netfilter/nf_nat_irc.c +++ b/net/ipv4/netfilter/nf_nat_irc.c | |||
@@ -22,12 +22,6 @@ | |||
22 | #include <net/netfilter/nf_conntrack_expect.h> | 22 | #include <net/netfilter/nf_conntrack_expect.h> |
23 | #include <linux/netfilter/nf_conntrack_irc.h> | 23 | #include <linux/netfilter/nf_conntrack_irc.h> |
24 | 24 | ||
25 | #if 0 | ||
26 | #define DEBUGP printk | ||
27 | #else | ||
28 | #define DEBUGP(format, args...) | ||
29 | #endif | ||
30 | |||
31 | MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); | 25 | MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); |
32 | MODULE_DESCRIPTION("IRC (DCC) NAT helper"); | 26 | MODULE_DESCRIPTION("IRC (DCC) NAT helper"); |
33 | MODULE_LICENSE("GPL"); | 27 | MODULE_LICENSE("GPL"); |
@@ -44,9 +38,6 @@ static unsigned int help(struct sk_buff **pskb, | |||
44 | u_int16_t port; | 38 | u_int16_t port; |
45 | unsigned int ret; | 39 | unsigned int ret; |
46 | 40 | ||
47 | DEBUGP("IRC_NAT: info (seq %u + %u) in %u\n", | ||
48 | expect->seq, exp_irc_info->len, ntohl(tcph->seq)); | ||
49 | |||
50 | /* Reply comes from server. */ | 41 | /* Reply comes from server. */ |
51 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | 42 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; |
52 | exp->dir = IP_CT_DIR_REPLY; | 43 | exp->dir = IP_CT_DIR_REPLY; |
@@ -55,7 +46,7 @@ static unsigned int help(struct sk_buff **pskb, | |||
55 | /* Try to get same port: if not, try to change it. */ | 46 | /* Try to get same port: if not, try to change it. */ |
56 | for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { | 47 | for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { |
57 | exp->tuple.dst.u.tcp.port = htons(port); | 48 | exp->tuple.dst.u.tcp.port = htons(port); |
58 | if (nf_conntrack_expect_related(exp) == 0) | 49 | if (nf_ct_expect_related(exp) == 0) |
59 | break; | 50 | break; |
60 | } | 51 | } |
61 | 52 | ||
@@ -64,14 +55,14 @@ static unsigned int help(struct sk_buff **pskb, | |||
64 | 55 | ||
65 | ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip); | 56 | ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip); |
66 | sprintf(buffer, "%u %u", ip, port); | 57 | sprintf(buffer, "%u %u", ip, port); |
67 | DEBUGP("nf_nat_irc: inserting '%s' == %u.%u.%u.%u, port %u\n", | 58 | pr_debug("nf_nat_irc: inserting '%s' == %u.%u.%u.%u, port %u\n", |
68 | buffer, NIPQUAD(ip), port); | 59 | buffer, NIPQUAD(ip), port); |
69 | 60 | ||
70 | ret = nf_nat_mangle_tcp_packet(pskb, exp->master, ctinfo, | 61 | ret = nf_nat_mangle_tcp_packet(pskb, exp->master, ctinfo, |
71 | matchoff, matchlen, buffer, | 62 | matchoff, matchlen, buffer, |
72 | strlen(buffer)); | 63 | strlen(buffer)); |
73 | if (ret != NF_ACCEPT) | 64 | if (ret != NF_ACCEPT) |
74 | nf_conntrack_unexpect_related(exp); | 65 | nf_ct_unexpect_related(exp); |
75 | return ret; | 66 | return ret; |
76 | } | 67 | } |
77 | 68 | ||
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c index a66888749ceb..984ec8308b2e 100644 --- a/net/ipv4/netfilter/nf_nat_pptp.c +++ b/net/ipv4/netfilter/nf_nat_pptp.c | |||
@@ -37,14 +37,6 @@ MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); | |||
37 | MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP"); | 37 | MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP"); |
38 | MODULE_ALIAS("ip_nat_pptp"); | 38 | MODULE_ALIAS("ip_nat_pptp"); |
39 | 39 | ||
40 | #if 0 | ||
41 | extern const char *pptp_msg_name[]; | ||
42 | #define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \ | ||
43 | __FUNCTION__, ## args) | ||
44 | #else | ||
45 | #define DEBUGP(format, args...) | ||
46 | #endif | ||
47 | |||
48 | static void pptp_nat_expected(struct nf_conn *ct, | 40 | static void pptp_nat_expected(struct nf_conn *ct, |
49 | struct nf_conntrack_expect *exp) | 41 | struct nf_conntrack_expect *exp) |
50 | { | 42 | { |
@@ -60,7 +52,7 @@ static void pptp_nat_expected(struct nf_conn *ct, | |||
60 | 52 | ||
61 | /* And here goes the grand finale of corrosion... */ | 53 | /* And here goes the grand finale of corrosion... */ |
62 | if (exp->dir == IP_CT_DIR_ORIGINAL) { | 54 | if (exp->dir == IP_CT_DIR_ORIGINAL) { |
63 | DEBUGP("we are PNS->PAC\n"); | 55 | pr_debug("we are PNS->PAC\n"); |
64 | /* therefore, build tuple for PAC->PNS */ | 56 | /* therefore, build tuple for PAC->PNS */ |
65 | t.src.l3num = AF_INET; | 57 | t.src.l3num = AF_INET; |
66 | t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip; | 58 | t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip; |
@@ -69,7 +61,7 @@ static void pptp_nat_expected(struct nf_conn *ct, | |||
69 | t.dst.u.gre.key = ct_pptp_info->pns_call_id; | 61 | t.dst.u.gre.key = ct_pptp_info->pns_call_id; |
70 | t.dst.protonum = IPPROTO_GRE; | 62 | t.dst.protonum = IPPROTO_GRE; |
71 | } else { | 63 | } else { |
72 | DEBUGP("we are PAC->PNS\n"); | 64 | pr_debug("we are PAC->PNS\n"); |
73 | /* build tuple for PNS->PAC */ | 65 | /* build tuple for PNS->PAC */ |
74 | t.src.l3num = AF_INET; | 66 | t.src.l3num = AF_INET; |
75 | t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip; | 67 | t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip; |
@@ -79,15 +71,15 @@ static void pptp_nat_expected(struct nf_conn *ct, | |||
79 | t.dst.protonum = IPPROTO_GRE; | 71 | t.dst.protonum = IPPROTO_GRE; |
80 | } | 72 | } |
81 | 73 | ||
82 | DEBUGP("trying to unexpect other dir: "); | 74 | pr_debug("trying to unexpect other dir: "); |
83 | NF_CT_DUMP_TUPLE(&t); | 75 | NF_CT_DUMP_TUPLE(&t); |
84 | other_exp = nf_conntrack_expect_find_get(&t); | 76 | other_exp = nf_ct_expect_find_get(&t); |
85 | if (other_exp) { | 77 | if (other_exp) { |
86 | nf_conntrack_unexpect_related(other_exp); | 78 | nf_ct_unexpect_related(other_exp); |
87 | nf_conntrack_expect_put(other_exp); | 79 | nf_ct_expect_put(other_exp); |
88 | DEBUGP("success\n"); | 80 | pr_debug("success\n"); |
89 | } else { | 81 | } else { |
90 | DEBUGP("not found!\n"); | 82 | pr_debug("not found!\n"); |
91 | } | 83 | } |
92 | 84 | ||
93 | /* This must be a fresh one. */ | 85 | /* This must be a fresh one. */ |
@@ -161,9 +153,9 @@ pptp_outbound_pkt(struct sk_buff **pskb, | |||
161 | cid_off = offsetof(union pptp_ctrl_union, clrreq.callID); | 153 | cid_off = offsetof(union pptp_ctrl_union, clrreq.callID); |
162 | break; | 154 | break; |
163 | default: | 155 | default: |
164 | DEBUGP("unknown outbound packet 0x%04x:%s\n", msg, | 156 | pr_debug("unknown outbound packet 0x%04x:%s\n", msg, |
165 | (msg <= PPTP_MSG_MAX)? | 157 | msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : |
166 | pptp_msg_name[msg]:pptp_msg_name[0]); | 158 | pptp_msg_name[0]); |
167 | /* fall through */ | 159 | /* fall through */ |
168 | case PPTP_SET_LINK_INFO: | 160 | case PPTP_SET_LINK_INFO: |
169 | /* only need to NAT in case PAC is behind NAT box */ | 161 | /* only need to NAT in case PAC is behind NAT box */ |
@@ -179,8 +171,8 @@ pptp_outbound_pkt(struct sk_buff **pskb, | |||
179 | 171 | ||
180 | /* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass | 172 | /* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass |
181 | * down to here */ | 173 | * down to here */ |
182 | DEBUGP("altering call id from 0x%04x to 0x%04x\n", | 174 | pr_debug("altering call id from 0x%04x to 0x%04x\n", |
183 | ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid)); | 175 | ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid)); |
184 | 176 | ||
185 | /* mangle packet */ | 177 | /* mangle packet */ |
186 | if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, | 178 | if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, |
@@ -255,8 +247,9 @@ pptp_inbound_pkt(struct sk_buff **pskb, | |||
255 | pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); | 247 | pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); |
256 | break; | 248 | break; |
257 | default: | 249 | default: |
258 | DEBUGP("unknown inbound packet %s\n", (msg <= PPTP_MSG_MAX)? | 250 | pr_debug("unknown inbound packet %s\n", |
259 | pptp_msg_name[msg]:pptp_msg_name[0]); | 251 | msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : |
252 | pptp_msg_name[0]); | ||
260 | /* fall through */ | 253 | /* fall through */ |
261 | case PPTP_START_SESSION_REQUEST: | 254 | case PPTP_START_SESSION_REQUEST: |
262 | case PPTP_START_SESSION_REPLY: | 255 | case PPTP_START_SESSION_REPLY: |
@@ -272,8 +265,8 @@ pptp_inbound_pkt(struct sk_buff **pskb, | |||
272 | * WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */ | 265 | * WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */ |
273 | 266 | ||
274 | /* mangle packet */ | 267 | /* mangle packet */ |
275 | DEBUGP("altering peer call id from 0x%04x to 0x%04x\n", | 268 | pr_debug("altering peer call id from 0x%04x to 0x%04x\n", |
276 | ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); | 269 | ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); |
277 | 270 | ||
278 | if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, | 271 | if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, |
279 | pcid_off + sizeof(struct pptp_pkt_hdr) + | 272 | pcid_off + sizeof(struct pptp_pkt_hdr) + |
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c index c3908bc5a709..2e40cc83526a 100644 --- a/net/ipv4/netfilter/nf_nat_proto_gre.c +++ b/net/ipv4/netfilter/nf_nat_proto_gre.c | |||
@@ -36,13 +36,6 @@ MODULE_LICENSE("GPL"); | |||
36 | MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); | 36 | MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); |
37 | MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE"); | 37 | MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE"); |
38 | 38 | ||
39 | #if 0 | ||
40 | #define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \ | ||
41 | __FUNCTION__, ## args) | ||
42 | #else | ||
43 | #define DEBUGP(x, args...) | ||
44 | #endif | ||
45 | |||
46 | /* is key in given range between min and max */ | 39 | /* is key in given range between min and max */ |
47 | static int | 40 | static int |
48 | gre_in_range(const struct nf_conntrack_tuple *tuple, | 41 | gre_in_range(const struct nf_conntrack_tuple *tuple, |
@@ -83,7 +76,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple, | |||
83 | keyptr = &tuple->dst.u.gre.key; | 76 | keyptr = &tuple->dst.u.gre.key; |
84 | 77 | ||
85 | if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { | 78 | if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { |
86 | DEBUGP("%p: NATing GRE PPTP\n", conntrack); | 79 | pr_debug("%p: NATing GRE PPTP\n", conntrack); |
87 | min = 1; | 80 | min = 1; |
88 | range_size = 0xffff; | 81 | range_size = 0xffff; |
89 | } else { | 82 | } else { |
@@ -91,7 +84,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple, | |||
91 | range_size = ntohs(range->max.gre.key) - min + 1; | 84 | range_size = ntohs(range->max.gre.key) - min + 1; |
92 | } | 85 | } |
93 | 86 | ||
94 | DEBUGP("min = %u, range_size = %u\n", min, range_size); | 87 | pr_debug("min = %u, range_size = %u\n", min, range_size); |
95 | 88 | ||
96 | for (i = 0; i < range_size; i++, key++) { | 89 | for (i = 0; i < range_size; i++, key++) { |
97 | *keyptr = htons(min + key % range_size); | 90 | *keyptr = htons(min + key % range_size); |
@@ -99,7 +92,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple, | |||
99 | return 1; | 92 | return 1; |
100 | } | 93 | } |
101 | 94 | ||
102 | DEBUGP("%p: no NAT mapping\n", conntrack); | 95 | pr_debug("%p: no NAT mapping\n", conntrack); |
103 | return 0; | 96 | return 0; |
104 | } | 97 | } |
105 | 98 | ||
@@ -132,11 +125,11 @@ gre_manip_pkt(struct sk_buff **pskb, unsigned int iphdroff, | |||
132 | * Try to behave like "nf_nat_proto_unknown" */ | 125 | * Try to behave like "nf_nat_proto_unknown" */ |
133 | break; | 126 | break; |
134 | case GRE_VERSION_PPTP: | 127 | case GRE_VERSION_PPTP: |
135 | DEBUGP("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key)); | 128 | pr_debug("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key)); |
136 | pgreh->call_id = tuple->dst.u.gre.key; | 129 | pgreh->call_id = tuple->dst.u.gre.key; |
137 | break; | 130 | break; |
138 | default: | 131 | default: |
139 | DEBUGP("can't nat unknown GRE version\n"); | 132 | pr_debug("can't nat unknown GRE version\n"); |
140 | return 0; | 133 | return 0; |
141 | } | 134 | } |
142 | return 1; | 135 | return 1; |
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c index 6740736c5e79..0f45427e5fdc 100644 --- a/net/ipv4/netfilter/nf_nat_rule.c +++ b/net/ipv4/netfilter/nf_nat_rule.c | |||
@@ -24,12 +24,6 @@ | |||
24 | #include <net/netfilter/nf_nat_core.h> | 24 | #include <net/netfilter/nf_nat_core.h> |
25 | #include <net/netfilter/nf_nat_rule.h> | 25 | #include <net/netfilter/nf_nat_rule.h> |
26 | 26 | ||
27 | #if 0 | ||
28 | #define DEBUGP printk | ||
29 | #else | ||
30 | #define DEBUGP(format, args...) | ||
31 | #endif | ||
32 | |||
33 | #define NAT_VALID_HOOKS ((1<<NF_IP_PRE_ROUTING) | (1<<NF_IP_POST_ROUTING) | (1<<NF_IP_LOCAL_OUT)) | 27 | #define NAT_VALID_HOOKS ((1<<NF_IP_PRE_ROUTING) | (1<<NF_IP_POST_ROUTING) | (1<<NF_IP_LOCAL_OUT)) |
34 | 28 | ||
35 | static struct | 29 | static struct |
@@ -140,39 +134,39 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb, | |||
140 | return nf_nat_setup_info(ct, &mr->range[0], hooknum); | 134 | return nf_nat_setup_info(ct, &mr->range[0], hooknum); |
141 | } | 135 | } |
142 | 136 | ||
143 | static int ipt_snat_checkentry(const char *tablename, | 137 | static bool ipt_snat_checkentry(const char *tablename, |
144 | const void *entry, | 138 | const void *entry, |
145 | const struct xt_target *target, | 139 | const struct xt_target *target, |
146 | void *targinfo, | 140 | void *targinfo, |
147 | unsigned int hook_mask) | 141 | unsigned int hook_mask) |
148 | { | 142 | { |
149 | struct nf_nat_multi_range_compat *mr = targinfo; | 143 | struct nf_nat_multi_range_compat *mr = targinfo; |
150 | 144 | ||
151 | /* Must be a valid range */ | 145 | /* Must be a valid range */ |
152 | if (mr->rangesize != 1) { | 146 | if (mr->rangesize != 1) { |
153 | printk("SNAT: multiple ranges no longer supported\n"); | 147 | printk("SNAT: multiple ranges no longer supported\n"); |
154 | return 0; | 148 | return false; |
155 | } | 149 | } |
156 | return 1; | 150 | return true; |
157 | } | 151 | } |
158 | 152 | ||
159 | static int ipt_dnat_checkentry(const char *tablename, | 153 | static bool ipt_dnat_checkentry(const char *tablename, |
160 | const void *entry, | 154 | const void *entry, |
161 | const struct xt_target *target, | 155 | const struct xt_target *target, |
162 | void *targinfo, | 156 | void *targinfo, |
163 | unsigned int hook_mask) | 157 | unsigned int hook_mask) |
164 | { | 158 | { |
165 | struct nf_nat_multi_range_compat *mr = targinfo; | 159 | struct nf_nat_multi_range_compat *mr = targinfo; |
166 | 160 | ||
167 | /* Must be a valid range */ | 161 | /* Must be a valid range */ |
168 | if (mr->rangesize != 1) { | 162 | if (mr->rangesize != 1) { |
169 | printk("DNAT: multiple ranges no longer supported\n"); | 163 | printk("DNAT: multiple ranges no longer supported\n"); |
170 | return 0; | 164 | return false; |
171 | } | 165 | } |
172 | return 1; | 166 | return true; |
173 | } | 167 | } |
174 | 168 | ||
175 | inline unsigned int | 169 | unsigned int |
176 | alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) | 170 | alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) |
177 | { | 171 | { |
178 | /* Force range to this IP; let proto decide mapping for | 172 | /* Force range to this IP; let proto decide mapping for |
@@ -186,8 +180,8 @@ alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) | |||
186 | struct nf_nat_range range | 180 | struct nf_nat_range range |
187 | = { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } }; | 181 | = { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } }; |
188 | 182 | ||
189 | DEBUGP("Allocating NULL binding for %p (%u.%u.%u.%u)\n", | 183 | pr_debug("Allocating NULL binding for %p (%u.%u.%u.%u)\n", |
190 | ct, NIPQUAD(ip)); | 184 | ct, NIPQUAD(ip)); |
191 | return nf_nat_setup_info(ct, &range, hooknum); | 185 | return nf_nat_setup_info(ct, &range, hooknum); |
192 | } | 186 | } |
193 | 187 | ||
@@ -205,8 +199,8 @@ alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum) | |||
205 | struct nf_nat_range range | 199 | struct nf_nat_range range |
206 | = { IP_NAT_RANGE_MAP_IPS, ip, ip, { all }, { all } }; | 200 | = { IP_NAT_RANGE_MAP_IPS, ip, ip, { all }, { all } }; |
207 | 201 | ||
208 | DEBUGP("Allocating NULL binding for confirmed %p (%u.%u.%u.%u)\n", | 202 | pr_debug("Allocating NULL binding for confirmed %p (%u.%u.%u.%u)\n", |
209 | ct, NIPQUAD(ip)); | 203 | ct, NIPQUAD(ip)); |
210 | return nf_nat_setup_info(ct, &range, hooknum); | 204 | return nf_nat_setup_info(ct, &range, hooknum); |
211 | } | 205 | } |
212 | 206 | ||
@@ -228,7 +222,7 @@ int nf_nat_rule_find(struct sk_buff **pskb, | |||
228 | return ret; | 222 | return ret; |
229 | } | 223 | } |
230 | 224 | ||
231 | static struct xt_target ipt_snat_reg = { | 225 | static struct xt_target ipt_snat_reg __read_mostly = { |
232 | .name = "SNAT", | 226 | .name = "SNAT", |
233 | .target = ipt_snat_target, | 227 | .target = ipt_snat_target, |
234 | .targetsize = sizeof(struct nf_nat_multi_range_compat), | 228 | .targetsize = sizeof(struct nf_nat_multi_range_compat), |
@@ -238,7 +232,7 @@ static struct xt_target ipt_snat_reg = { | |||
238 | .family = AF_INET, | 232 | .family = AF_INET, |
239 | }; | 233 | }; |
240 | 234 | ||
241 | static struct xt_target ipt_dnat_reg = { | 235 | static struct xt_target ipt_dnat_reg __read_mostly = { |
242 | .name = "DNAT", | 236 | .name = "DNAT", |
243 | .target = ipt_dnat_target, | 237 | .target = ipt_dnat_target, |
244 | .targetsize = sizeof(struct nf_nat_multi_range_compat), | 238 | .targetsize = sizeof(struct nf_nat_multi_range_compat), |
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index fac97cf51ae5..a889ec3ec83a 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c | |||
@@ -26,12 +26,6 @@ MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>"); | |||
26 | MODULE_DESCRIPTION("SIP NAT helper"); | 26 | MODULE_DESCRIPTION("SIP NAT helper"); |
27 | MODULE_ALIAS("ip_nat_sip"); | 27 | MODULE_ALIAS("ip_nat_sip"); |
28 | 28 | ||
29 | #if 0 | ||
30 | #define DEBUGP printk | ||
31 | #else | ||
32 | #define DEBUGP(format, args...) | ||
33 | #endif | ||
34 | |||
35 | struct addr_map { | 29 | struct addr_map { |
36 | struct { | 30 | struct { |
37 | char src[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; | 31 | char src[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; |
@@ -257,10 +251,12 @@ static unsigned int ip_nat_sdp(struct sk_buff **pskb, | |||
257 | __be32 newip; | 251 | __be32 newip; |
258 | u_int16_t port; | 252 | u_int16_t port; |
259 | 253 | ||
260 | DEBUGP("ip_nat_sdp():\n"); | ||
261 | |||
262 | /* Connection will come from reply */ | 254 | /* Connection will come from reply */ |
263 | newip = ct->tuplehash[!dir].tuple.dst.u3.ip; | 255 | if (ct->tuplehash[dir].tuple.src.u3.ip == |
256 | ct->tuplehash[!dir].tuple.dst.u3.ip) | ||
257 | newip = exp->tuple.dst.u3.ip; | ||
258 | else | ||
259 | newip = ct->tuplehash[!dir].tuple.dst.u3.ip; | ||
264 | 260 | ||
265 | exp->saved_ip = exp->tuple.dst.u3.ip; | 261 | exp->saved_ip = exp->tuple.dst.u3.ip; |
266 | exp->tuple.dst.u3.ip = newip; | 262 | exp->tuple.dst.u3.ip = newip; |
@@ -274,7 +270,7 @@ static unsigned int ip_nat_sdp(struct sk_buff **pskb, | |||
274 | /* Try to get same port: if not, try to change it. */ | 270 | /* Try to get same port: if not, try to change it. */ |
275 | for (port = ntohs(exp->saved_proto.udp.port); port != 0; port++) { | 271 | for (port = ntohs(exp->saved_proto.udp.port); port != 0; port++) { |
276 | exp->tuple.dst.u.udp.port = htons(port); | 272 | exp->tuple.dst.u.udp.port = htons(port); |
277 | if (nf_conntrack_expect_related(exp) == 0) | 273 | if (nf_ct_expect_related(exp) == 0) |
278 | break; | 274 | break; |
279 | } | 275 | } |
280 | 276 | ||
@@ -282,7 +278,7 @@ static unsigned int ip_nat_sdp(struct sk_buff **pskb, | |||
282 | return NF_DROP; | 278 | return NF_DROP; |
283 | 279 | ||
284 | if (!mangle_sdp(pskb, ctinfo, ct, newip, port, dptr)) { | 280 | if (!mangle_sdp(pskb, ctinfo, ct, newip, port, dptr)) { |
285 | nf_conntrack_unexpect_related(exp); | 281 | nf_ct_unexpect_related(exp); |
286 | return NF_DROP; | 282 | return NF_DROP; |
287 | } | 283 | } |
288 | return NF_ACCEPT; | 284 | return NF_ACCEPT; |
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index 6e88505d6162..6bfcd3a90f08 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c | |||
@@ -1276,9 +1276,6 @@ static struct nf_conntrack_helper snmp_helper __read_mostly = { | |||
1276 | .tuple.src.l3num = AF_INET, | 1276 | .tuple.src.l3num = AF_INET, |
1277 | .tuple.src.u.udp.port = __constant_htons(SNMP_PORT), | 1277 | .tuple.src.u.udp.port = __constant_htons(SNMP_PORT), |
1278 | .tuple.dst.protonum = IPPROTO_UDP, | 1278 | .tuple.dst.protonum = IPPROTO_UDP, |
1279 | .mask.src.l3num = 0xFFFF, | ||
1280 | .mask.src.u.udp.port = __constant_htons(0xFFFF), | ||
1281 | .mask.dst.protonum = 0xFF, | ||
1282 | }; | 1279 | }; |
1283 | 1280 | ||
1284 | static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { | 1281 | static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { |
@@ -1290,9 +1287,6 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { | |||
1290 | .tuple.src.l3num = AF_INET, | 1287 | .tuple.src.l3num = AF_INET, |
1291 | .tuple.src.u.udp.port = __constant_htons(SNMP_TRAP_PORT), | 1288 | .tuple.src.u.udp.port = __constant_htons(SNMP_TRAP_PORT), |
1292 | .tuple.dst.protonum = IPPROTO_UDP, | 1289 | .tuple.dst.protonum = IPPROTO_UDP, |
1293 | .mask.src.l3num = 0xFFFF, | ||
1294 | .mask.src.u.udp.port = __constant_htons(0xFFFF), | ||
1295 | .mask.dst.protonum = 0xFF, | ||
1296 | }; | 1290 | }; |
1297 | 1291 | ||
1298 | /***************************************************************************** | 1292 | /***************************************************************************** |
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index 55dac36dbc85..332814dac503 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <net/netfilter/nf_conntrack.h> | 20 | #include <net/netfilter/nf_conntrack.h> |
21 | #include <net/netfilter/nf_conntrack_core.h> | 21 | #include <net/netfilter/nf_conntrack_core.h> |
22 | #include <net/netfilter/nf_conntrack_extend.h> | ||
22 | #include <net/netfilter/nf_nat.h> | 23 | #include <net/netfilter/nf_nat.h> |
23 | #include <net/netfilter/nf_nat_rule.h> | 24 | #include <net/netfilter/nf_nat_rule.h> |
24 | #include <net/netfilter/nf_nat_protocol.h> | 25 | #include <net/netfilter/nf_nat_protocol.h> |
@@ -26,12 +27,6 @@ | |||
26 | #include <net/netfilter/nf_nat_helper.h> | 27 | #include <net/netfilter/nf_nat_helper.h> |
27 | #include <linux/netfilter_ipv4/ip_tables.h> | 28 | #include <linux/netfilter_ipv4/ip_tables.h> |
28 | 29 | ||
29 | #if 0 | ||
30 | #define DEBUGP printk | ||
31 | #else | ||
32 | #define DEBUGP(format, args...) | ||
33 | #endif | ||
34 | |||
35 | #ifdef CONFIG_XFRM | 30 | #ifdef CONFIG_XFRM |
36 | static void nat_decode_session(struct sk_buff *skb, struct flowi *fl) | 31 | static void nat_decode_session(struct sk_buff *skb, struct flowi *fl) |
37 | { | 32 | { |
@@ -113,8 +108,13 @@ nf_nat_fn(unsigned int hooknum, | |||
113 | return NF_ACCEPT; | 108 | return NF_ACCEPT; |
114 | 109 | ||
115 | nat = nfct_nat(ct); | 110 | nat = nfct_nat(ct); |
116 | if (!nat) | 111 | if (!nat) { |
117 | return NF_ACCEPT; | 112 | nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); |
113 | if (nat == NULL) { | ||
114 | pr_debug("failed to add NAT extension\n"); | ||
115 | return NF_ACCEPT; | ||
116 | } | ||
117 | } | ||
118 | 118 | ||
119 | switch (ctinfo) { | 119 | switch (ctinfo) { |
120 | case IP_CT_RELATED: | 120 | case IP_CT_RELATED: |
@@ -148,9 +148,9 @@ nf_nat_fn(unsigned int hooknum, | |||
148 | return ret; | 148 | return ret; |
149 | } | 149 | } |
150 | } else | 150 | } else |
151 | DEBUGP("Already setup manip %s for ct %p\n", | 151 | pr_debug("Already setup manip %s for ct %p\n", |
152 | maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST", | 152 | maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST", |
153 | ct); | 153 | ct); |
154 | break; | 154 | break; |
155 | 155 | ||
156 | default: | 156 | default: |
@@ -264,7 +264,7 @@ nf_nat_adjust(unsigned int hooknum, | |||
264 | 264 | ||
265 | ct = nf_ct_get(*pskb, &ctinfo); | 265 | ct = nf_ct_get(*pskb, &ctinfo); |
266 | if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { | 266 | if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { |
267 | DEBUGP("nf_nat_standalone: adjusting sequence number\n"); | 267 | pr_debug("nf_nat_standalone: adjusting sequence number\n"); |
268 | if (!nf_nat_seq_adjust(pskb, ct, ctinfo)) | 268 | if (!nf_nat_seq_adjust(pskb, ct, ctinfo)) |
269 | return NF_DROP; | 269 | return NF_DROP; |
270 | } | 270 | } |
@@ -326,26 +326,10 @@ static struct nf_hook_ops nf_nat_ops[] = { | |||
326 | 326 | ||
327 | static int __init nf_nat_standalone_init(void) | 327 | static int __init nf_nat_standalone_init(void) |
328 | { | 328 | { |
329 | int size, ret = 0; | 329 | int ret = 0; |
330 | 330 | ||
331 | need_conntrack(); | 331 | need_conntrack(); |
332 | 332 | ||
333 | size = ALIGN(sizeof(struct nf_conn), __alignof__(struct nf_conn_nat)) + | ||
334 | sizeof(struct nf_conn_nat); | ||
335 | ret = nf_conntrack_register_cache(NF_CT_F_NAT, "nf_nat:base", size); | ||
336 | if (ret < 0) { | ||
337 | printk(KERN_ERR "nf_nat_init: Unable to create slab cache\n"); | ||
338 | return ret; | ||
339 | } | ||
340 | |||
341 | size = ALIGN(size, __alignof__(struct nf_conn_help)) + | ||
342 | sizeof(struct nf_conn_help); | ||
343 | ret = nf_conntrack_register_cache(NF_CT_F_NAT|NF_CT_F_HELP, | ||
344 | "nf_nat:help", size); | ||
345 | if (ret < 0) { | ||
346 | printk(KERN_ERR "nf_nat_init: Unable to create slab cache\n"); | ||
347 | goto cleanup_register_cache; | ||
348 | } | ||
349 | #ifdef CONFIG_XFRM | 333 | #ifdef CONFIG_XFRM |
350 | BUG_ON(ip_nat_decode_session != NULL); | 334 | BUG_ON(ip_nat_decode_session != NULL); |
351 | ip_nat_decode_session = nat_decode_session; | 335 | ip_nat_decode_session = nat_decode_session; |
@@ -360,7 +344,6 @@ static int __init nf_nat_standalone_init(void) | |||
360 | printk("nf_nat_init: can't register hooks.\n"); | 344 | printk("nf_nat_init: can't register hooks.\n"); |
361 | goto cleanup_rule_init; | 345 | goto cleanup_rule_init; |
362 | } | 346 | } |
363 | nf_nat_module_is_loaded = 1; | ||
364 | return ret; | 347 | return ret; |
365 | 348 | ||
366 | cleanup_rule_init: | 349 | cleanup_rule_init: |
@@ -370,9 +353,6 @@ static int __init nf_nat_standalone_init(void) | |||
370 | ip_nat_decode_session = NULL; | 353 | ip_nat_decode_session = NULL; |
371 | synchronize_net(); | 354 | synchronize_net(); |
372 | #endif | 355 | #endif |
373 | nf_conntrack_unregister_cache(NF_CT_F_NAT|NF_CT_F_HELP); | ||
374 | cleanup_register_cache: | ||
375 | nf_conntrack_unregister_cache(NF_CT_F_NAT); | ||
376 | return ret; | 356 | return ret; |
377 | } | 357 | } |
378 | 358 | ||
@@ -380,7 +360,6 @@ static void __exit nf_nat_standalone_fini(void) | |||
380 | { | 360 | { |
381 | nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); | 361 | nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); |
382 | nf_nat_rule_cleanup(); | 362 | nf_nat_rule_cleanup(); |
383 | nf_nat_module_is_loaded = 0; | ||
384 | #ifdef CONFIG_XFRM | 363 | #ifdef CONFIG_XFRM |
385 | ip_nat_decode_session = NULL; | 364 | ip_nat_decode_session = NULL; |
386 | synchronize_net(); | 365 | synchronize_net(); |
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c index 2566b79de224..04dfeaefec02 100644 --- a/net/ipv4/netfilter/nf_nat_tftp.c +++ b/net/ipv4/netfilter/nf_nat_tftp.c | |||
@@ -30,7 +30,7 @@ static unsigned int help(struct sk_buff **pskb, | |||
30 | = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; | 30 | = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; |
31 | exp->dir = IP_CT_DIR_REPLY; | 31 | exp->dir = IP_CT_DIR_REPLY; |
32 | exp->expectfn = nf_nat_follow_master; | 32 | exp->expectfn = nf_nat_follow_master; |
33 | if (nf_conntrack_expect_related(exp) != 0) | 33 | if (nf_ct_expect_related(exp) != 0) |
34 | return NF_DROP; | 34 | return NF_DROP; |
35 | return NF_ACCEPT; | 35 | return NF_ACCEPT; |
36 | } | 36 | } |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 29ca63e81ced..88fa648d7ba3 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -101,7 +101,6 @@ | |||
101 | #include <net/tcp.h> | 101 | #include <net/tcp.h> |
102 | #include <net/icmp.h> | 102 | #include <net/icmp.h> |
103 | #include <net/xfrm.h> | 103 | #include <net/xfrm.h> |
104 | #include <net/ip_mp_alg.h> | ||
105 | #include <net/netevent.h> | 104 | #include <net/netevent.h> |
106 | #include <net/rtnetlink.h> | 105 | #include <net/rtnetlink.h> |
107 | #ifdef CONFIG_SYSCTL | 106 | #ifdef CONFIG_SYSCTL |
@@ -168,7 +167,7 @@ static struct dst_ops ipv4_dst_ops = { | |||
168 | 167 | ||
169 | #define ECN_OR_COST(class) TC_PRIO_##class | 168 | #define ECN_OR_COST(class) TC_PRIO_##class |
170 | 169 | ||
171 | __u8 ip_tos2prio[16] = { | 170 | const __u8 ip_tos2prio[16] = { |
172 | TC_PRIO_BESTEFFORT, | 171 | TC_PRIO_BESTEFFORT, |
173 | ECN_OR_COST(FILLER), | 172 | ECN_OR_COST(FILLER), |
174 | TC_PRIO_BESTEFFORT, | 173 | TC_PRIO_BESTEFFORT, |
@@ -495,13 +494,11 @@ static const struct file_operations rt_cpu_seq_fops = { | |||
495 | 494 | ||
496 | static __inline__ void rt_free(struct rtable *rt) | 495 | static __inline__ void rt_free(struct rtable *rt) |
497 | { | 496 | { |
498 | multipath_remove(rt); | ||
499 | call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); | 497 | call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); |
500 | } | 498 | } |
501 | 499 | ||
502 | static __inline__ void rt_drop(struct rtable *rt) | 500 | static __inline__ void rt_drop(struct rtable *rt) |
503 | { | 501 | { |
504 | multipath_remove(rt); | ||
505 | ip_rt_put(rt); | 502 | ip_rt_put(rt); |
506 | call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); | 503 | call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); |
507 | } | 504 | } |
@@ -574,52 +571,6 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) | |||
574 | (fl1->iif ^ fl2->iif)) == 0; | 571 | (fl1->iif ^ fl2->iif)) == 0; |
575 | } | 572 | } |
576 | 573 | ||
577 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED | ||
578 | static struct rtable **rt_remove_balanced_route(struct rtable **chain_head, | ||
579 | struct rtable *expentry, | ||
580 | int *removed_count) | ||
581 | { | ||
582 | int passedexpired = 0; | ||
583 | struct rtable **nextstep = NULL; | ||
584 | struct rtable **rthp = chain_head; | ||
585 | struct rtable *rth; | ||
586 | |||
587 | if (removed_count) | ||
588 | *removed_count = 0; | ||
589 | |||
590 | while ((rth = *rthp) != NULL) { | ||
591 | if (rth == expentry) | ||
592 | passedexpired = 1; | ||
593 | |||
594 | if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 && | ||
595 | compare_keys(&(*rthp)->fl, &expentry->fl)) { | ||
596 | if (*rthp == expentry) { | ||
597 | *rthp = rth->u.dst.rt_next; | ||
598 | continue; | ||
599 | } else { | ||
600 | *rthp = rth->u.dst.rt_next; | ||
601 | rt_free(rth); | ||
602 | if (removed_count) | ||
603 | ++(*removed_count); | ||
604 | } | ||
605 | } else { | ||
606 | if (!((*rthp)->u.dst.flags & DST_BALANCED) && | ||
607 | passedexpired && !nextstep) | ||
608 | nextstep = &rth->u.dst.rt_next; | ||
609 | |||
610 | rthp = &rth->u.dst.rt_next; | ||
611 | } | ||
612 | } | ||
613 | |||
614 | rt_free(expentry); | ||
615 | if (removed_count) | ||
616 | ++(*removed_count); | ||
617 | |||
618 | return nextstep; | ||
619 | } | ||
620 | #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ | ||
621 | |||
622 | |||
623 | /* This runs via a timer and thus is always in BH context. */ | 574 | /* This runs via a timer and thus is always in BH context. */ |
624 | static void rt_check_expire(unsigned long dummy) | 575 | static void rt_check_expire(unsigned long dummy) |
625 | { | 576 | { |
@@ -658,22 +609,8 @@ static void rt_check_expire(unsigned long dummy) | |||
658 | } | 609 | } |
659 | 610 | ||
660 | /* Cleanup aged off entries. */ | 611 | /* Cleanup aged off entries. */ |
661 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED | ||
662 | /* remove all related balanced entries if necessary */ | ||
663 | if (rth->u.dst.flags & DST_BALANCED) { | ||
664 | rthp = rt_remove_balanced_route( | ||
665 | &rt_hash_table[i].chain, | ||
666 | rth, NULL); | ||
667 | if (!rthp) | ||
668 | break; | ||
669 | } else { | ||
670 | *rthp = rth->u.dst.rt_next; | ||
671 | rt_free(rth); | ||
672 | } | ||
673 | #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ | ||
674 | *rthp = rth->u.dst.rt_next; | 612 | *rthp = rth->u.dst.rt_next; |
675 | rt_free(rth); | 613 | rt_free(rth); |
676 | #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ | ||
677 | } | 614 | } |
678 | spin_unlock(rt_hash_lock_addr(i)); | 615 | spin_unlock(rt_hash_lock_addr(i)); |
679 | 616 | ||
@@ -721,9 +658,6 @@ void rt_cache_flush(int delay) | |||
721 | if (delay < 0) | 658 | if (delay < 0) |
722 | delay = ip_rt_min_delay; | 659 | delay = ip_rt_min_delay; |
723 | 660 | ||
724 | /* flush existing multipath state*/ | ||
725 | multipath_flush(); | ||
726 | |||
727 | spin_lock_bh(&rt_flush_lock); | 661 | spin_lock_bh(&rt_flush_lock); |
728 | 662 | ||
729 | if (del_timer(&rt_flush_timer) && delay > 0 && rt_deadline) { | 663 | if (del_timer(&rt_flush_timer) && delay > 0 && rt_deadline) { |
@@ -842,30 +776,9 @@ static int rt_garbage_collect(void) | |||
842 | rthp = &rth->u.dst.rt_next; | 776 | rthp = &rth->u.dst.rt_next; |
843 | continue; | 777 | continue; |
844 | } | 778 | } |
845 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED | ||
846 | /* remove all related balanced entries | ||
847 | * if necessary | ||
848 | */ | ||
849 | if (rth->u.dst.flags & DST_BALANCED) { | ||
850 | int r; | ||
851 | |||
852 | rthp = rt_remove_balanced_route( | ||
853 | &rt_hash_table[k].chain, | ||
854 | rth, | ||
855 | &r); | ||
856 | goal -= r; | ||
857 | if (!rthp) | ||
858 | break; | ||
859 | } else { | ||
860 | *rthp = rth->u.dst.rt_next; | ||
861 | rt_free(rth); | ||
862 | goal--; | ||
863 | } | ||
864 | #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ | ||
865 | *rthp = rth->u.dst.rt_next; | 779 | *rthp = rth->u.dst.rt_next; |
866 | rt_free(rth); | 780 | rt_free(rth); |
867 | goal--; | 781 | goal--; |
868 | #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ | ||
869 | } | 782 | } |
870 | spin_unlock_bh(rt_hash_lock_addr(k)); | 783 | spin_unlock_bh(rt_hash_lock_addr(k)); |
871 | if (goal <= 0) | 784 | if (goal <= 0) |
@@ -939,12 +852,7 @@ restart: | |||
939 | 852 | ||
940 | spin_lock_bh(rt_hash_lock_addr(hash)); | 853 | spin_lock_bh(rt_hash_lock_addr(hash)); |
941 | while ((rth = *rthp) != NULL) { | 854 | while ((rth = *rthp) != NULL) { |
942 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED | ||
943 | if (!(rth->u.dst.flags & DST_BALANCED) && | ||
944 | compare_keys(&rth->fl, &rt->fl)) { | ||
945 | #else | ||
946 | if (compare_keys(&rth->fl, &rt->fl)) { | 855 | if (compare_keys(&rth->fl, &rt->fl)) { |
947 | #endif | ||
948 | /* Put it first */ | 856 | /* Put it first */ |
949 | *rthp = rth->u.dst.rt_next; | 857 | *rthp = rth->u.dst.rt_next; |
950 | /* | 858 | /* |
@@ -1774,10 +1682,6 @@ static inline int __mkroute_input(struct sk_buff *skb, | |||
1774 | 1682 | ||
1775 | atomic_set(&rth->u.dst.__refcnt, 1); | 1683 | atomic_set(&rth->u.dst.__refcnt, 1); |
1776 | rth->u.dst.flags= DST_HOST; | 1684 | rth->u.dst.flags= DST_HOST; |
1777 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED | ||
1778 | if (res->fi->fib_nhs > 1) | ||
1779 | rth->u.dst.flags |= DST_BALANCED; | ||
1780 | #endif | ||
1781 | if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) | 1685 | if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) |
1782 | rth->u.dst.flags |= DST_NOPOLICY; | 1686 | rth->u.dst.flags |= DST_NOPOLICY; |
1783 | if (IN_DEV_CONF_GET(out_dev, NOXFRM)) | 1687 | if (IN_DEV_CONF_GET(out_dev, NOXFRM)) |
@@ -1812,11 +1716,11 @@ static inline int __mkroute_input(struct sk_buff *skb, | |||
1812 | return err; | 1716 | return err; |
1813 | } | 1717 | } |
1814 | 1718 | ||
1815 | static inline int ip_mkroute_input_def(struct sk_buff *skb, | 1719 | static inline int ip_mkroute_input(struct sk_buff *skb, |
1816 | struct fib_result* res, | 1720 | struct fib_result* res, |
1817 | const struct flowi *fl, | 1721 | const struct flowi *fl, |
1818 | struct in_device *in_dev, | 1722 | struct in_device *in_dev, |
1819 | __be32 daddr, __be32 saddr, u32 tos) | 1723 | __be32 daddr, __be32 saddr, u32 tos) |
1820 | { | 1724 | { |
1821 | struct rtable* rth = NULL; | 1725 | struct rtable* rth = NULL; |
1822 | int err; | 1726 | int err; |
@@ -1837,63 +1741,6 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb, | |||
1837 | return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); | 1741 | return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); |
1838 | } | 1742 | } |
1839 | 1743 | ||
1840 | static inline int ip_mkroute_input(struct sk_buff *skb, | ||
1841 | struct fib_result* res, | ||
1842 | const struct flowi *fl, | ||
1843 | struct in_device *in_dev, | ||
1844 | __be32 daddr, __be32 saddr, u32 tos) | ||
1845 | { | ||
1846 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED | ||
1847 | struct rtable* rth = NULL, *rtres; | ||
1848 | unsigned char hop, hopcount; | ||
1849 | int err = -EINVAL; | ||
1850 | unsigned int hash; | ||
1851 | |||
1852 | if (res->fi) | ||
1853 | hopcount = res->fi->fib_nhs; | ||
1854 | else | ||
1855 | hopcount = 1; | ||
1856 | |||
1857 | /* distinguish between multipath and singlepath */ | ||
1858 | if (hopcount < 2) | ||
1859 | return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, | ||
1860 | saddr, tos); | ||
1861 | |||
1862 | /* add all alternatives to the routing cache */ | ||
1863 | for (hop = 0; hop < hopcount; hop++) { | ||
1864 | res->nh_sel = hop; | ||
1865 | |||
1866 | /* put reference to previous result */ | ||
1867 | if (hop) | ||
1868 | ip_rt_put(rtres); | ||
1869 | |||
1870 | /* create a routing cache entry */ | ||
1871 | err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, | ||
1872 | &rth); | ||
1873 | if (err) | ||
1874 | return err; | ||
1875 | |||
1876 | /* put it into the cache */ | ||
1877 | hash = rt_hash(daddr, saddr, fl->iif); | ||
1878 | err = rt_intern_hash(hash, rth, &rtres); | ||
1879 | if (err) | ||
1880 | return err; | ||
1881 | |||
1882 | /* forward hop information to multipath impl. */ | ||
1883 | multipath_set_nhinfo(rth, | ||
1884 | FIB_RES_NETWORK(*res), | ||
1885 | FIB_RES_NETMASK(*res), | ||
1886 | res->prefixlen, | ||
1887 | &FIB_RES_NH(*res)); | ||
1888 | } | ||
1889 | skb->dst = &rtres->u.dst; | ||
1890 | return err; | ||
1891 | #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ | ||
1892 | return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, saddr, tos); | ||
1893 | #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ | ||
1894 | } | ||
1895 | |||
1896 | |||
1897 | /* | 1744 | /* |
1898 | * NOTE. We drop all the packets that has local source | 1745 | * NOTE. We drop all the packets that has local source |
1899 | * addresses, because every properly looped back packet | 1746 | * addresses, because every properly looped back packet |
@@ -2211,13 +2058,6 @@ static inline int __mkroute_output(struct rtable **result, | |||
2211 | 2058 | ||
2212 | atomic_set(&rth->u.dst.__refcnt, 1); | 2059 | atomic_set(&rth->u.dst.__refcnt, 1); |
2213 | rth->u.dst.flags= DST_HOST; | 2060 | rth->u.dst.flags= DST_HOST; |
2214 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED | ||
2215 | if (res->fi) { | ||
2216 | rth->rt_multipath_alg = res->fi->fib_mp_alg; | ||
2217 | if (res->fi->fib_nhs > 1) | ||
2218 | rth->u.dst.flags |= DST_BALANCED; | ||
2219 | } | ||
2220 | #endif | ||
2221 | if (IN_DEV_CONF_GET(in_dev, NOXFRM)) | 2061 | if (IN_DEV_CONF_GET(in_dev, NOXFRM)) |
2222 | rth->u.dst.flags |= DST_NOXFRM; | 2062 | rth->u.dst.flags |= DST_NOXFRM; |
2223 | if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) | 2063 | if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) |
@@ -2277,12 +2117,12 @@ static inline int __mkroute_output(struct rtable **result, | |||
2277 | return err; | 2117 | return err; |
2278 | } | 2118 | } |
2279 | 2119 | ||
2280 | static inline int ip_mkroute_output_def(struct rtable **rp, | 2120 | static inline int ip_mkroute_output(struct rtable **rp, |
2281 | struct fib_result* res, | 2121 | struct fib_result* res, |
2282 | const struct flowi *fl, | 2122 | const struct flowi *fl, |
2283 | const struct flowi *oldflp, | 2123 | const struct flowi *oldflp, |
2284 | struct net_device *dev_out, | 2124 | struct net_device *dev_out, |
2285 | unsigned flags) | 2125 | unsigned flags) |
2286 | { | 2126 | { |
2287 | struct rtable *rth = NULL; | 2127 | struct rtable *rth = NULL; |
2288 | int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); | 2128 | int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); |
@@ -2295,68 +2135,6 @@ static inline int ip_mkroute_output_def(struct rtable **rp, | |||
2295 | return err; | 2135 | return err; |
2296 | } | 2136 | } |
2297 | 2137 | ||
2298 | static inline int ip_mkroute_output(struct rtable** rp, | ||
2299 | struct fib_result* res, | ||
2300 | const struct flowi *fl, | ||
2301 | const struct flowi *oldflp, | ||
2302 | struct net_device *dev_out, | ||
2303 | unsigned flags) | ||
2304 | { | ||
2305 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED | ||
2306 | unsigned char hop; | ||
2307 | unsigned hash; | ||
2308 | int err = -EINVAL; | ||
2309 | struct rtable *rth = NULL; | ||
2310 | |||
2311 | if (res->fi && res->fi->fib_nhs > 1) { | ||
2312 | unsigned char hopcount = res->fi->fib_nhs; | ||
2313 | |||
2314 | for (hop = 0; hop < hopcount; hop++) { | ||
2315 | struct net_device *dev2nexthop; | ||
2316 | |||
2317 | res->nh_sel = hop; | ||
2318 | |||
2319 | /* hold a work reference to the output device */ | ||
2320 | dev2nexthop = FIB_RES_DEV(*res); | ||
2321 | dev_hold(dev2nexthop); | ||
2322 | |||
2323 | /* put reference to previous result */ | ||
2324 | if (hop) | ||
2325 | ip_rt_put(*rp); | ||
2326 | |||
2327 | err = __mkroute_output(&rth, res, fl, oldflp, | ||
2328 | dev2nexthop, flags); | ||
2329 | |||
2330 | if (err != 0) | ||
2331 | goto cleanup; | ||
2332 | |||
2333 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, | ||
2334 | oldflp->oif); | ||
2335 | err = rt_intern_hash(hash, rth, rp); | ||
2336 | |||
2337 | /* forward hop information to multipath impl. */ | ||
2338 | multipath_set_nhinfo(rth, | ||
2339 | FIB_RES_NETWORK(*res), | ||
2340 | FIB_RES_NETMASK(*res), | ||
2341 | res->prefixlen, | ||
2342 | &FIB_RES_NH(*res)); | ||
2343 | cleanup: | ||
2344 | /* release work reference to output device */ | ||
2345 | dev_put(dev2nexthop); | ||
2346 | |||
2347 | if (err != 0) | ||
2348 | return err; | ||
2349 | } | ||
2350 | return err; | ||
2351 | } else { | ||
2352 | return ip_mkroute_output_def(rp, res, fl, oldflp, dev_out, | ||
2353 | flags); | ||
2354 | } | ||
2355 | #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ | ||
2356 | return ip_mkroute_output_def(rp, res, fl, oldflp, dev_out, flags); | ||
2357 | #endif | ||
2358 | } | ||
2359 | |||
2360 | /* | 2138 | /* |
2361 | * Major route resolver routine. | 2139 | * Major route resolver routine. |
2362 | */ | 2140 | */ |
@@ -2570,17 +2348,6 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp) | |||
2570 | rth->fl.mark == flp->mark && | 2348 | rth->fl.mark == flp->mark && |
2571 | !((rth->fl.fl4_tos ^ flp->fl4_tos) & | 2349 | !((rth->fl.fl4_tos ^ flp->fl4_tos) & |
2572 | (IPTOS_RT_MASK | RTO_ONLINK))) { | 2350 | (IPTOS_RT_MASK | RTO_ONLINK))) { |
2573 | |||
2574 | /* check for multipath routes and choose one if | ||
2575 | * necessary | ||
2576 | */ | ||
2577 | if (multipath_select_route(flp, rth, rp)) { | ||
2578 | dst_hold(&(*rp)->u.dst); | ||
2579 | RT_CACHE_STAT_INC(out_hit); | ||
2580 | rcu_read_unlock_bh(); | ||
2581 | return 0; | ||
2582 | } | ||
2583 | |||
2584 | rth->u.dst.lastuse = jiffies; | 2351 | rth->u.dst.lastuse = jiffies; |
2585 | dst_hold(&rth->u.dst); | 2352 | dst_hold(&rth->u.dst); |
2586 | rth->u.dst.__use++; | 2353 | rth->u.dst.__use++; |
@@ -2729,10 +2496,6 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
2729 | if (rt->u.dst.tclassid) | 2496 | if (rt->u.dst.tclassid) |
2730 | NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid); | 2497 | NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid); |
2731 | #endif | 2498 | #endif |
2732 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED | ||
2733 | if (rt->rt_multipath_alg != IP_MP_ALG_NONE) | ||
2734 | NLA_PUT_U32(skb, RTA_MP_ALGO, rt->rt_multipath_alg); | ||
2735 | #endif | ||
2736 | if (rt->fl.iif) | 2499 | if (rt->fl.iif) |
2737 | NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); | 2500 | NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); |
2738 | else if (rt->rt_src != rt->fl.fl4_src) | 2501 | else if (rt->rt_src != rt->fl.fl4_src) |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 354721d67f69..3f5f7423b95c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -2045,10 +2045,7 @@ static void *established_get_first(struct seq_file *seq) | |||
2045 | struct hlist_node *node; | 2045 | struct hlist_node *node; |
2046 | struct inet_timewait_sock *tw; | 2046 | struct inet_timewait_sock *tw; |
2047 | 2047 | ||
2048 | /* We can reschedule _before_ having picked the target: */ | 2048 | read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock); |
2049 | cond_resched_softirq(); | ||
2050 | |||
2051 | read_lock(&tcp_hashinfo.ehash[st->bucket].lock); | ||
2052 | sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { | 2049 | sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { |
2053 | if (sk->sk_family != st->family) { | 2050 | if (sk->sk_family != st->family) { |
2054 | continue; | 2051 | continue; |
@@ -2065,7 +2062,7 @@ static void *established_get_first(struct seq_file *seq) | |||
2065 | rc = tw; | 2062 | rc = tw; |
2066 | goto out; | 2063 | goto out; |
2067 | } | 2064 | } |
2068 | read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); | 2065 | read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock); |
2069 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 2066 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
2070 | } | 2067 | } |
2071 | out: | 2068 | out: |
@@ -2092,14 +2089,11 @@ get_tw: | |||
2092 | cur = tw; | 2089 | cur = tw; |
2093 | goto out; | 2090 | goto out; |
2094 | } | 2091 | } |
2095 | read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); | 2092 | read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock); |
2096 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 2093 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
2097 | 2094 | ||
2098 | /* We can reschedule between buckets: */ | ||
2099 | cond_resched_softirq(); | ||
2100 | |||
2101 | if (++st->bucket < tcp_hashinfo.ehash_size) { | 2095 | if (++st->bucket < tcp_hashinfo.ehash_size) { |
2102 | read_lock(&tcp_hashinfo.ehash[st->bucket].lock); | 2096 | read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock); |
2103 | sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); | 2097 | sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); |
2104 | } else { | 2098 | } else { |
2105 | cur = NULL; | 2099 | cur = NULL; |
@@ -2144,7 +2138,6 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos) | |||
2144 | 2138 | ||
2145 | if (!rc) { | 2139 | if (!rc) { |
2146 | inet_listen_unlock(&tcp_hashinfo); | 2140 | inet_listen_unlock(&tcp_hashinfo); |
2147 | local_bh_disable(); | ||
2148 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 2141 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
2149 | rc = established_get_idx(seq, pos); | 2142 | rc = established_get_idx(seq, pos); |
2150 | } | 2143 | } |
@@ -2177,7 +2170,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2177 | rc = listening_get_next(seq, v); | 2170 | rc = listening_get_next(seq, v); |
2178 | if (!rc) { | 2171 | if (!rc) { |
2179 | inet_listen_unlock(&tcp_hashinfo); | 2172 | inet_listen_unlock(&tcp_hashinfo); |
2180 | local_bh_disable(); | ||
2181 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 2173 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
2182 | rc = established_get_first(seq); | 2174 | rc = established_get_first(seq); |
2183 | } | 2175 | } |
@@ -2209,8 +2201,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) | |||
2209 | case TCP_SEQ_STATE_TIME_WAIT: | 2201 | case TCP_SEQ_STATE_TIME_WAIT: |
2210 | case TCP_SEQ_STATE_ESTABLISHED: | 2202 | case TCP_SEQ_STATE_ESTABLISHED: |
2211 | if (v) | 2203 | if (v) |
2212 | read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); | 2204 | read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock); |
2213 | local_bh_enable(); | ||
2214 | break; | 2205 | break; |
2215 | } | 2206 | } |
2216 | } | 2207 | } |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 53232dd6fb48..20aea1595c4d 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -699,6 +699,14 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss | |||
699 | tp->fackets_out -= diff; | 699 | tp->fackets_out -= diff; |
700 | if ((int)tp->fackets_out < 0) | 700 | if ((int)tp->fackets_out < 0) |
701 | tp->fackets_out = 0; | 701 | tp->fackets_out = 0; |
702 | /* SACK fastpath might overwrite it unless dealt with */ | ||
703 | if (tp->fastpath_skb_hint != NULL && | ||
704 | after(TCP_SKB_CB(tp->fastpath_skb_hint)->seq, | ||
705 | TCP_SKB_CB(skb)->seq)) { | ||
706 | tp->fastpath_cnt_hint -= diff; | ||
707 | if ((int)tp->fastpath_cnt_hint < 0) | ||
708 | tp->fastpath_cnt_hint = 0; | ||
709 | } | ||
702 | } | 710 | } |
703 | } | 711 | } |
704 | 712 | ||
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index d9323dfff826..86624fabc4bf 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c | |||
@@ -6,8 +6,7 @@ | |||
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
9 | * the Free Software Foundation; either version 2 of the License, or | 9 | * the Free Software Foundation; either version 2 of the License. |
10 | * (at your option) any later version. | ||
11 | * | 10 | * |
12 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
@@ -25,23 +24,22 @@ | |||
25 | #include <linux/tcp.h> | 24 | #include <linux/tcp.h> |
26 | #include <linux/proc_fs.h> | 25 | #include <linux/proc_fs.h> |
27 | #include <linux/module.h> | 26 | #include <linux/module.h> |
28 | #include <linux/kfifo.h> | ||
29 | #include <linux/ktime.h> | 27 | #include <linux/ktime.h> |
30 | #include <linux/time.h> | 28 | #include <linux/time.h> |
31 | #include <linux/vmalloc.h> | ||
32 | 29 | ||
33 | #include <net/tcp.h> | 30 | #include <net/tcp.h> |
34 | 31 | ||
35 | MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); | 32 | MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); |
36 | MODULE_DESCRIPTION("TCP cwnd snooper"); | 33 | MODULE_DESCRIPTION("TCP cwnd snooper"); |
37 | MODULE_LICENSE("GPL"); | 34 | MODULE_LICENSE("GPL"); |
35 | MODULE_VERSION("1.1"); | ||
38 | 36 | ||
39 | static int port __read_mostly = 0; | 37 | static int port __read_mostly = 0; |
40 | MODULE_PARM_DESC(port, "Port to match (0=all)"); | 38 | MODULE_PARM_DESC(port, "Port to match (0=all)"); |
41 | module_param(port, int, 0); | 39 | module_param(port, int, 0); |
42 | 40 | ||
43 | static int bufsize __read_mostly = 64*1024; | 41 | static int bufsize __read_mostly = 4096; |
44 | MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)"); | 42 | MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)"); |
45 | module_param(bufsize, int, 0); | 43 | module_param(bufsize, int, 0); |
46 | 44 | ||
47 | static int full __read_mostly; | 45 | static int full __read_mostly; |
@@ -50,39 +48,38 @@ module_param(full, int, 0); | |||
50 | 48 | ||
51 | static const char procname[] = "tcpprobe"; | 49 | static const char procname[] = "tcpprobe"; |
52 | 50 | ||
53 | struct { | 51 | struct tcp_log { |
54 | struct kfifo *fifo; | 52 | ktime_t tstamp; |
53 | __be32 saddr, daddr; | ||
54 | __be16 sport, dport; | ||
55 | u16 length; | ||
56 | u32 snd_nxt; | ||
57 | u32 snd_una; | ||
58 | u32 snd_wnd; | ||
59 | u32 snd_cwnd; | ||
60 | u32 ssthresh; | ||
61 | u32 srtt; | ||
62 | }; | ||
63 | |||
64 | static struct { | ||
55 | spinlock_t lock; | 65 | spinlock_t lock; |
56 | wait_queue_head_t wait; | 66 | wait_queue_head_t wait; |
57 | ktime_t start; | 67 | ktime_t start; |
58 | u32 lastcwnd; | 68 | u32 lastcwnd; |
59 | } tcpw; | ||
60 | 69 | ||
61 | /* | 70 | unsigned long head, tail; |
62 | * Print to log with timestamps. | 71 | struct tcp_log *log; |
63 | * FIXME: causes an extra copy | 72 | } tcp_probe; |
64 | */ | 73 | |
65 | static void printl(const char *fmt, ...) | ||
66 | __attribute__ ((format (printf, 1, 2))); | ||
67 | 74 | ||
68 | static void printl(const char *fmt, ...) | 75 | static inline int tcp_probe_used(void) |
69 | { | 76 | { |
70 | va_list args; | 77 | return (tcp_probe.head - tcp_probe.tail) % bufsize; |
71 | int len; | 78 | } |
72 | struct timespec tv; | 79 | |
73 | char tbuf[256]; | 80 | static inline int tcp_probe_avail(void) |
74 | 81 | { | |
75 | va_start(args, fmt); | 82 | return bufsize - tcp_probe_used(); |
76 | /* want monotonic time since start of tcp_probe */ | ||
77 | tv = ktime_to_timespec(ktime_sub(ktime_get(), tcpw.start)); | ||
78 | |||
79 | len = sprintf(tbuf, "%lu.%09lu ", | ||
80 | (unsigned long) tv.tv_sec, (unsigned long) tv.tv_nsec); | ||
81 | len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args); | ||
82 | va_end(args); | ||
83 | |||
84 | kfifo_put(tcpw.fifo, tbuf, len); | ||
85 | wake_up(&tcpw.wait); | ||
86 | } | 83 | } |
87 | 84 | ||
88 | /* | 85 | /* |
@@ -97,63 +94,117 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
97 | 94 | ||
98 | /* Only update if port matches */ | 95 | /* Only update if port matches */ |
99 | if ((port == 0 || ntohs(inet->dport) == port || ntohs(inet->sport) == port) | 96 | if ((port == 0 || ntohs(inet->dport) == port || ntohs(inet->sport) == port) |
100 | && (full || tp->snd_cwnd != tcpw.lastcwnd)) { | 97 | && (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { |
101 | printl("%d.%d.%d.%d:%u %d.%d.%d.%d:%u %d %#x %#x %u %u %u %u\n", | 98 | |
102 | NIPQUAD(inet->saddr), ntohs(inet->sport), | 99 | spin_lock(&tcp_probe.lock); |
103 | NIPQUAD(inet->daddr), ntohs(inet->dport), | 100 | /* If log fills, just silently drop */ |
104 | skb->len, tp->snd_nxt, tp->snd_una, | 101 | if (tcp_probe_avail() > 1) { |
105 | tp->snd_cwnd, tcp_current_ssthresh(sk), | 102 | struct tcp_log *p = tcp_probe.log + tcp_probe.head; |
106 | tp->snd_wnd, tp->srtt >> 3); | 103 | |
107 | tcpw.lastcwnd = tp->snd_cwnd; | 104 | p->tstamp = ktime_get(); |
105 | p->saddr = inet->saddr; | ||
106 | p->sport = inet->sport; | ||
107 | p->daddr = inet->daddr; | ||
108 | p->dport = inet->dport; | ||
109 | p->length = skb->len; | ||
110 | p->snd_nxt = tp->snd_nxt; | ||
111 | p->snd_una = tp->snd_una; | ||
112 | p->snd_cwnd = tp->snd_cwnd; | ||
113 | p->snd_wnd = tp->snd_wnd; | ||
114 | p->srtt = tp->srtt >> 3; | ||
115 | |||
116 | tcp_probe.head = (tcp_probe.head + 1) % bufsize; | ||
117 | } | ||
118 | tcp_probe.lastcwnd = tp->snd_cwnd; | ||
119 | spin_unlock(&tcp_probe.lock); | ||
120 | |||
121 | wake_up(&tcp_probe.wait); | ||
108 | } | 122 | } |
109 | 123 | ||
110 | jprobe_return(); | 124 | jprobe_return(); |
111 | return 0; | 125 | return 0; |
112 | } | 126 | } |
113 | 127 | ||
114 | static struct jprobe tcp_probe = { | 128 | static struct jprobe tcp_jprobe = { |
115 | .kp = { | 129 | .kp = { |
116 | .symbol_name = "tcp_rcv_established", | 130 | .symbol_name = "tcp_rcv_established", |
117 | }, | 131 | }, |
118 | .entry = JPROBE_ENTRY(jtcp_rcv_established), | 132 | .entry = JPROBE_ENTRY(jtcp_rcv_established), |
119 | }; | 133 | }; |
120 | 134 | ||
121 | |||
122 | static int tcpprobe_open(struct inode * inode, struct file * file) | 135 | static int tcpprobe_open(struct inode * inode, struct file * file) |
123 | { | 136 | { |
124 | kfifo_reset(tcpw.fifo); | 137 | /* Reset (empty) log */ |
125 | tcpw.start = ktime_get(); | 138 | spin_lock_bh(&tcp_probe.lock); |
139 | tcp_probe.head = tcp_probe.tail = 0; | ||
140 | tcp_probe.start = ktime_get(); | ||
141 | spin_unlock_bh(&tcp_probe.lock); | ||
142 | |||
126 | return 0; | 143 | return 0; |
127 | } | 144 | } |
128 | 145 | ||
146 | static int tcpprobe_sprint(char *tbuf, int n) | ||
147 | { | ||
148 | const struct tcp_log *p | ||
149 | = tcp_probe.log + tcp_probe.tail % bufsize; | ||
150 | struct timespec tv | ||
151 | = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); | ||
152 | |||
153 | return snprintf(tbuf, n, | ||
154 | "%lu.%09lu %d.%d.%d.%d:%u %d.%d.%d.%d:%u" | ||
155 | " %d %#x %#x %u %u %u %u\n", | ||
156 | (unsigned long) tv.tv_sec, | ||
157 | (unsigned long) tv.tv_nsec, | ||
158 | NIPQUAD(p->saddr), ntohs(p->sport), | ||
159 | NIPQUAD(p->daddr), ntohs(p->dport), | ||
160 | p->length, p->snd_nxt, p->snd_una, | ||
161 | p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt); | ||
162 | } | ||
163 | |||
129 | static ssize_t tcpprobe_read(struct file *file, char __user *buf, | 164 | static ssize_t tcpprobe_read(struct file *file, char __user *buf, |
130 | size_t len, loff_t *ppos) | 165 | size_t len, loff_t *ppos) |
131 | { | 166 | { |
132 | int error = 0, cnt = 0; | 167 | int error = 0, cnt = 0; |
133 | unsigned char *tbuf; | ||
134 | 168 | ||
135 | if (!buf || len < 0) | 169 | if (!buf || len < 0) |
136 | return -EINVAL; | 170 | return -EINVAL; |
137 | 171 | ||
138 | if (len == 0) | 172 | while (cnt < len) { |
139 | return 0; | 173 | char tbuf[128]; |
174 | int width; | ||
175 | |||
176 | /* Wait for data in buffer */ | ||
177 | error = wait_event_interruptible(tcp_probe.wait, | ||
178 | tcp_probe_used() > 0); | ||
179 | if (error) | ||
180 | break; | ||
140 | 181 | ||
141 | tbuf = vmalloc(len); | 182 | spin_lock_bh(&tcp_probe.lock); |
142 | if (!tbuf) | 183 | if (tcp_probe.head == tcp_probe.tail) { |
143 | return -ENOMEM; | 184 | /* multiple readers race? */ |
185 | spin_unlock_bh(&tcp_probe.lock); | ||
186 | continue; | ||
187 | } | ||
144 | 188 | ||
145 | error = wait_event_interruptible(tcpw.wait, | 189 | width = tcpprobe_sprint(tbuf, sizeof(tbuf)); |
146 | __kfifo_len(tcpw.fifo) != 0); | ||
147 | if (error) | ||
148 | goto out_free; | ||
149 | 190 | ||
150 | cnt = kfifo_get(tcpw.fifo, tbuf, len); | 191 | if (width < len) |
151 | error = copy_to_user(buf, tbuf, cnt); | 192 | tcp_probe.tail = (tcp_probe.tail + 1) % bufsize; |
152 | 193 | ||
153 | out_free: | 194 | spin_unlock_bh(&tcp_probe.lock); |
154 | vfree(tbuf); | 195 | |
196 | /* if record greater than space available | ||
197 | return partial buffer (so far) */ | ||
198 | if (width >= len) | ||
199 | break; | ||
200 | |||
201 | error = copy_to_user(buf + cnt, tbuf, width); | ||
202 | if (error) | ||
203 | break; | ||
204 | cnt += width; | ||
205 | } | ||
155 | 206 | ||
156 | return error ? error : cnt; | 207 | return cnt == 0 ? error : cnt; |
157 | } | 208 | } |
158 | 209 | ||
159 | static const struct file_operations tcpprobe_fops = { | 210 | static const struct file_operations tcpprobe_fops = { |
@@ -166,34 +217,37 @@ static __init int tcpprobe_init(void) | |||
166 | { | 217 | { |
167 | int ret = -ENOMEM; | 218 | int ret = -ENOMEM; |
168 | 219 | ||
169 | init_waitqueue_head(&tcpw.wait); | 220 | init_waitqueue_head(&tcp_probe.wait); |
170 | spin_lock_init(&tcpw.lock); | 221 | spin_lock_init(&tcp_probe.lock); |
171 | tcpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &tcpw.lock); | 222 | |
172 | if (IS_ERR(tcpw.fifo)) | 223 | if (bufsize < 0) |
173 | return PTR_ERR(tcpw.fifo); | 224 | return -EINVAL; |
225 | |||
226 | tcp_probe.log = kcalloc(sizeof(struct tcp_log), bufsize, GFP_KERNEL); | ||
227 | if (!tcp_probe.log) | ||
228 | goto err0; | ||
174 | 229 | ||
175 | if (!proc_net_fops_create(procname, S_IRUSR, &tcpprobe_fops)) | 230 | if (!proc_net_fops_create(procname, S_IRUSR, &tcpprobe_fops)) |
176 | goto err0; | 231 | goto err0; |
177 | 232 | ||
178 | ret = register_jprobe(&tcp_probe); | 233 | ret = register_jprobe(&tcp_jprobe); |
179 | if (ret) | 234 | if (ret) |
180 | goto err1; | 235 | goto err1; |
181 | 236 | ||
182 | pr_info("TCP watch registered (port=%d)\n", port); | 237 | pr_info("TCP probe registered (port=%d)\n", port); |
183 | return 0; | 238 | return 0; |
184 | err1: | 239 | err1: |
185 | proc_net_remove(procname); | 240 | proc_net_remove(procname); |
186 | err0: | 241 | err0: |
187 | kfifo_free(tcpw.fifo); | 242 | kfree(tcp_probe.log); |
188 | return ret; | 243 | return ret; |
189 | } | 244 | } |
190 | module_init(tcpprobe_init); | 245 | module_init(tcpprobe_init); |
191 | 246 | ||
192 | static __exit void tcpprobe_exit(void) | 247 | static __exit void tcpprobe_exit(void) |
193 | { | 248 | { |
194 | kfifo_free(tcpw.fifo); | ||
195 | proc_net_remove(procname); | 249 | proc_net_remove(procname); |
196 | unregister_jprobe(&tcp_probe); | 250 | unregister_jprobe(&tcp_jprobe); |
197 | 251 | kfree(tcp_probe.log); | |
198 | } | 252 | } |
199 | module_exit(tcpprobe_exit); | 253 | module_exit(tcpprobe_exit); |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index facb7e29304e..28355350fb62 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -70,6 +70,7 @@ | |||
70 | * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind | 70 | * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind |
71 | * a single port at the same time. | 71 | * a single port at the same time. |
72 | * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support | 72 | * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support |
73 | * James Chapman : Add L2TP encapsulation type. | ||
73 | * | 74 | * |
74 | * | 75 | * |
75 | * This program is free software; you can redistribute it and/or | 76 | * This program is free software; you can redistribute it and/or |
@@ -919,104 +920,6 @@ int udp_disconnect(struct sock *sk, int flags) | |||
919 | return 0; | 920 | return 0; |
920 | } | 921 | } |
921 | 922 | ||
922 | /* return: | ||
923 | * 1 if the UDP system should process it | ||
924 | * 0 if we should drop this packet | ||
925 | * -1 if it should get processed by xfrm4_rcv_encap | ||
926 | */ | ||
927 | static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb) | ||
928 | { | ||
929 | #ifndef CONFIG_XFRM | ||
930 | return 1; | ||
931 | #else | ||
932 | struct udp_sock *up = udp_sk(sk); | ||
933 | struct udphdr *uh; | ||
934 | struct iphdr *iph; | ||
935 | int iphlen, len; | ||
936 | |||
937 | __u8 *udpdata; | ||
938 | __be32 *udpdata32; | ||
939 | __u16 encap_type = up->encap_type; | ||
940 | |||
941 | /* if we're overly short, let UDP handle it */ | ||
942 | len = skb->len - sizeof(struct udphdr); | ||
943 | if (len <= 0) | ||
944 | return 1; | ||
945 | |||
946 | /* if this is not encapsulated socket, then just return now */ | ||
947 | if (!encap_type) | ||
948 | return 1; | ||
949 | |||
950 | /* If this is a paged skb, make sure we pull up | ||
951 | * whatever data we need to look at. */ | ||
952 | if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8))) | ||
953 | return 1; | ||
954 | |||
955 | /* Now we can get the pointers */ | ||
956 | uh = udp_hdr(skb); | ||
957 | udpdata = (__u8 *)uh + sizeof(struct udphdr); | ||
958 | udpdata32 = (__be32 *)udpdata; | ||
959 | |||
960 | switch (encap_type) { | ||
961 | default: | ||
962 | case UDP_ENCAP_ESPINUDP: | ||
963 | /* Check if this is a keepalive packet. If so, eat it. */ | ||
964 | if (len == 1 && udpdata[0] == 0xff) { | ||
965 | return 0; | ||
966 | } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) { | ||
967 | /* ESP Packet without Non-ESP header */ | ||
968 | len = sizeof(struct udphdr); | ||
969 | } else | ||
970 | /* Must be an IKE packet.. pass it through */ | ||
971 | return 1; | ||
972 | break; | ||
973 | case UDP_ENCAP_ESPINUDP_NON_IKE: | ||
974 | /* Check if this is a keepalive packet. If so, eat it. */ | ||
975 | if (len == 1 && udpdata[0] == 0xff) { | ||
976 | return 0; | ||
977 | } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) && | ||
978 | udpdata32[0] == 0 && udpdata32[1] == 0) { | ||
979 | |||
980 | /* ESP Packet with Non-IKE marker */ | ||
981 | len = sizeof(struct udphdr) + 2 * sizeof(u32); | ||
982 | } else | ||
983 | /* Must be an IKE packet.. pass it through */ | ||
984 | return 1; | ||
985 | break; | ||
986 | } | ||
987 | |||
988 | /* At this point we are sure that this is an ESPinUDP packet, | ||
989 | * so we need to remove 'len' bytes from the packet (the UDP | ||
990 | * header and optional ESP marker bytes) and then modify the | ||
991 | * protocol to ESP, and then call into the transform receiver. | ||
992 | */ | ||
993 | if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | ||
994 | return 0; | ||
995 | |||
996 | /* Now we can update and verify the packet length... */ | ||
997 | iph = ip_hdr(skb); | ||
998 | iphlen = iph->ihl << 2; | ||
999 | iph->tot_len = htons(ntohs(iph->tot_len) - len); | ||
1000 | if (skb->len < iphlen + len) { | ||
1001 | /* packet is too small!?! */ | ||
1002 | return 0; | ||
1003 | } | ||
1004 | |||
1005 | /* pull the data buffer up to the ESP header and set the | ||
1006 | * transport header to point to ESP. Keep UDP on the stack | ||
1007 | * for later. | ||
1008 | */ | ||
1009 | __skb_pull(skb, len); | ||
1010 | skb_reset_transport_header(skb); | ||
1011 | |||
1012 | /* modify the protocol (it's ESP!) */ | ||
1013 | iph->protocol = IPPROTO_ESP; | ||
1014 | |||
1015 | /* and let the caller know to send this into the ESP processor... */ | ||
1016 | return -1; | ||
1017 | #endif | ||
1018 | } | ||
1019 | |||
1020 | /* returns: | 923 | /* returns: |
1021 | * -1: error | 924 | * -1: error |
1022 | * 0: success | 925 | * 0: success |
@@ -1039,28 +942,28 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
1039 | 942 | ||
1040 | if (up->encap_type) { | 943 | if (up->encap_type) { |
1041 | /* | 944 | /* |
1042 | * This is an encapsulation socket, so let's see if this is | 945 | * This is an encapsulation socket so pass the skb to |
1043 | * an encapsulated packet. | 946 | * the socket's udp_encap_rcv() hook. Otherwise, just |
1044 | * If it's a keepalive packet, then just eat it. | 947 | * fall through and pass this up the UDP socket. |
1045 | * If it's an encapsulateed packet, then pass it to the | 948 | * up->encap_rcv() returns the following value: |
1046 | * IPsec xfrm input and return the response | 949 | * =0 if skb was successfully passed to the encap |
1047 | * appropriately. Otherwise, just fall through and | 950 | * handler or was discarded by it. |
1048 | * pass this up the UDP socket. | 951 | * >0 if skb should be passed on to UDP. |
952 | * <0 if skb should be resubmitted as proto -N | ||
1049 | */ | 953 | */ |
1050 | int ret; | ||
1051 | 954 | ||
1052 | ret = udp_encap_rcv(sk, skb); | 955 | /* if we're overly short, let UDP handle it */ |
1053 | if (ret == 0) { | 956 | if (skb->len > sizeof(struct udphdr) && |
1054 | /* Eat the packet .. */ | 957 | up->encap_rcv != NULL) { |
1055 | kfree_skb(skb); | 958 | int ret; |
1056 | return 0; | 959 | |
1057 | } | 960 | ret = (*up->encap_rcv)(sk, skb); |
1058 | if (ret < 0) { | 961 | if (ret <= 0) { |
1059 | /* process the ESP packet */ | 962 | UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag); |
1060 | ret = xfrm4_rcv_encap(skb, up->encap_type); | 963 | return -ret; |
1061 | UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag); | 964 | } |
1062 | return -ret; | ||
1063 | } | 965 | } |
966 | |||
1064 | /* FALLTHROUGH -- it's a UDP Packet */ | 967 | /* FALLTHROUGH -- it's a UDP Packet */ |
1065 | } | 968 | } |
1066 | 969 | ||
@@ -1349,6 +1252,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, | |||
1349 | case 0: | 1252 | case 0: |
1350 | case UDP_ENCAP_ESPINUDP: | 1253 | case UDP_ENCAP_ESPINUDP: |
1351 | case UDP_ENCAP_ESPINUDP_NON_IKE: | 1254 | case UDP_ENCAP_ESPINUDP_NON_IKE: |
1255 | up->encap_rcv = xfrm4_udp_encap_rcv; | ||
1256 | /* FALLTHROUGH */ | ||
1257 | case UDP_ENCAP_L2TPINUDP: | ||
1352 | up->encap_type = val; | 1258 | up->encap_type = val; |
1353 | break; | 1259 | break; |
1354 | default: | 1260 | default: |
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index fa1902dc81b8..2fa108245413 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c | |||
@@ -16,13 +16,6 @@ | |||
16 | #include <net/ip.h> | 16 | #include <net/ip.h> |
17 | #include <net/xfrm.h> | 17 | #include <net/xfrm.h> |
18 | 18 | ||
19 | int xfrm4_rcv(struct sk_buff *skb) | ||
20 | { | ||
21 | return xfrm4_rcv_encap(skb, 0); | ||
22 | } | ||
23 | |||
24 | EXPORT_SYMBOL(xfrm4_rcv); | ||
25 | |||
26 | static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) | 19 | static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) |
27 | { | 20 | { |
28 | switch (nexthdr) { | 21 | switch (nexthdr) { |
@@ -53,7 +46,7 @@ drop: | |||
53 | } | 46 | } |
54 | #endif | 47 | #endif |
55 | 48 | ||
56 | int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) | 49 | static int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) |
57 | { | 50 | { |
58 | __be32 spi, seq; | 51 | __be32 spi, seq; |
59 | struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH]; | 52 | struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH]; |
@@ -167,3 +160,108 @@ drop: | |||
167 | kfree_skb(skb); | 160 | kfree_skb(skb); |
168 | return 0; | 161 | return 0; |
169 | } | 162 | } |
163 | |||
164 | /* If it's a keepalive packet, then just eat it. | ||
165 | * If it's an encapsulated packet, then pass it to the | ||
166 | * IPsec xfrm input. | ||
167 | * Returns 0 if skb passed to xfrm or was dropped. | ||
168 | * Returns >0 if skb should be passed to UDP. | ||
169 | * Returns <0 if skb should be resubmitted (-ret is protocol) | ||
170 | */ | ||
171 | int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) | ||
172 | { | ||
173 | struct udp_sock *up = udp_sk(sk); | ||
174 | struct udphdr *uh; | ||
175 | struct iphdr *iph; | ||
176 | int iphlen, len; | ||
177 | int ret; | ||
178 | |||
179 | __u8 *udpdata; | ||
180 | __be32 *udpdata32; | ||
181 | __u16 encap_type = up->encap_type; | ||
182 | |||
183 | /* if this is not encapsulated socket, then just return now */ | ||
184 | if (!encap_type) | ||
185 | return 1; | ||
186 | |||
187 | /* If this is a paged skb, make sure we pull up | ||
188 | * whatever data we need to look at. */ | ||
189 | len = skb->len - sizeof(struct udphdr); | ||
190 | if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8))) | ||
191 | return 1; | ||
192 | |||
193 | /* Now we can get the pointers */ | ||
194 | uh = udp_hdr(skb); | ||
195 | udpdata = (__u8 *)uh + sizeof(struct udphdr); | ||
196 | udpdata32 = (__be32 *)udpdata; | ||
197 | |||
198 | switch (encap_type) { | ||
199 | default: | ||
200 | case UDP_ENCAP_ESPINUDP: | ||
201 | /* Check if this is a keepalive packet. If so, eat it. */ | ||
202 | if (len == 1 && udpdata[0] == 0xff) { | ||
203 | goto drop; | ||
204 | } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) { | ||
205 | /* ESP Packet without Non-ESP header */ | ||
206 | len = sizeof(struct udphdr); | ||
207 | } else | ||
208 | /* Must be an IKE packet.. pass it through */ | ||
209 | return 1; | ||
210 | break; | ||
211 | case UDP_ENCAP_ESPINUDP_NON_IKE: | ||
212 | /* Check if this is a keepalive packet. If so, eat it. */ | ||
213 | if (len == 1 && udpdata[0] == 0xff) { | ||
214 | goto drop; | ||
215 | } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) && | ||
216 | udpdata32[0] == 0 && udpdata32[1] == 0) { | ||
217 | |||
218 | /* ESP Packet with Non-IKE marker */ | ||
219 | len = sizeof(struct udphdr) + 2 * sizeof(u32); | ||
220 | } else | ||
221 | /* Must be an IKE packet.. pass it through */ | ||
222 | return 1; | ||
223 | break; | ||
224 | } | ||
225 | |||
226 | /* At this point we are sure that this is an ESPinUDP packet, | ||
227 | * so we need to remove 'len' bytes from the packet (the UDP | ||
228 | * header and optional ESP marker bytes) and then modify the | ||
229 | * protocol to ESP, and then call into the transform receiver. | ||
230 | */ | ||
231 | if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | ||
232 | goto drop; | ||
233 | |||
234 | /* Now we can update and verify the packet length... */ | ||
235 | iph = ip_hdr(skb); | ||
236 | iphlen = iph->ihl << 2; | ||
237 | iph->tot_len = htons(ntohs(iph->tot_len) - len); | ||
238 | if (skb->len < iphlen + len) { | ||
239 | /* packet is too small!?! */ | ||
240 | goto drop; | ||
241 | } | ||
242 | |||
243 | /* pull the data buffer up to the ESP header and set the | ||
244 | * transport header to point to ESP. Keep UDP on the stack | ||
245 | * for later. | ||
246 | */ | ||
247 | __skb_pull(skb, len); | ||
248 | skb_reset_transport_header(skb); | ||
249 | |||
250 | /* modify the protocol (it's ESP!) */ | ||
251 | iph->protocol = IPPROTO_ESP; | ||
252 | |||
253 | /* process ESP */ | ||
254 | ret = xfrm4_rcv_encap(skb, encap_type); | ||
255 | return ret; | ||
256 | |||
257 | drop: | ||
258 | kfree_skb(skb); | ||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | int xfrm4_rcv(struct sk_buff *skb) | ||
263 | { | ||
264 | return xfrm4_rcv_encap(skb, 0); | ||
265 | } | ||
266 | |||
267 | EXPORT_SYMBOL(xfrm4_rcv); | ||
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c index 568510304553..9275c79119b6 100644 --- a/net/ipv4/xfrm4_tunnel.c +++ b/net/ipv4/xfrm4_tunnel.c | |||
@@ -109,3 +109,4 @@ static void __exit ipip_fini(void) | |||
109 | module_init(ipip_init); | 109 | module_init(ipip_init); |
110 | module_exit(ipip_fini); | 110 | module_exit(ipip_fini); |
111 | MODULE_LICENSE("GPL"); | 111 | MODULE_LICENSE("GPL"); |
112 | MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_IPIP); | ||