aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-10-31 12:29:42 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-31 12:29:42 -0400
commite3a88f9c4f79a4d138a0ea464cfbac40ba46644c (patch)
treef3deeee3286b19fa6ac15d001cd1ba13fb78abf1
parentde11b0e8c569b96c2cf6a811e3805b7aeef498a3 (diff)
parent127917c29a432c3b798e014a1714e9c1af0f87fe (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== netfilter/ipvs fixes for net The following patchset contains fixes for netfilter/ipvs. This round of fixes is larger than usual at this stage, specifically because of the nf_tables bridge reject fixes that I would like to see in 3.18. The patches are: 1) Fix a null-pointer dereference that may occur when logging errors. This problem was introduced by 4a4739d56b0 ("ipvs: Pull out crosses_local_route_boundary logic") in v3.17-rc5. 2) Update hook mask in nft_reject_bridge so we can also filter out packets from there. This fixes 36d2af5 ("netfilter: nf_tables: allow to filter from prerouting and postrouting"), which needs this chunk to work. 3) Two patches to refactor common code to forge the IPv4 and IPv6 reject packets from the bridge. These are required by the nf_tables reject bridge fix. 4) Fix nft_reject_bridge by avoiding the use of the IP stack to reject packets from the bridge. The idea is to forge the reject packets and inject them to the original port via br_deliver() which is now exported for that purpose. 5) Restrict nft_reject_bridge to bridge prerouting and input hooks. the original skbuff may cloned after prerouting when the bridge stack needs to flood it to several bridge ports, it is too late to reject the traffic. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h10
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h10
-rw-r--r--net/bridge/br_forward.c1
-rw-r--r--net/bridge/netfilter/nf_tables_bridge.c6
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c296
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c88
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c175
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c4
8 files changed, 483 insertions, 107 deletions
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index e8427193c777..03e928a55229 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -1,6 +1,8 @@
1#ifndef _IPV4_NF_REJECT_H 1#ifndef _IPV4_NF_REJECT_H
2#define _IPV4_NF_REJECT_H 2#define _IPV4_NF_REJECT_H
3 3
4#include <linux/skbuff.h>
5#include <net/ip.h>
4#include <net/icmp.h> 6#include <net/icmp.h>
5 7
6static inline void nf_send_unreach(struct sk_buff *skb_in, int code) 8static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
@@ -10,4 +12,12 @@ static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
10 12
11void nf_send_reset(struct sk_buff *oldskb, int hook); 13void nf_send_reset(struct sk_buff *oldskb, int hook);
12 14
15const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
16 struct tcphdr *_oth, int hook);
17struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
18 const struct sk_buff *oldskb,
19 __be16 protocol, int ttl);
20void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
21 const struct tcphdr *oth);
22
13#endif /* _IPV4_NF_REJECT_H */ 23#endif /* _IPV4_NF_REJECT_H */
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index 48e18810a9be..23216d48abf9 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -15,4 +15,14 @@ nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
15 15
16void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook); 16void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
17 17
18const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
19 struct tcphdr *otcph,
20 unsigned int *otcplen, int hook);
21struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
22 const struct sk_buff *oldskb,
23 __be16 protocol, int hoplimit);
24void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
25 const struct sk_buff *oldskb,
26 const struct tcphdr *oth, unsigned int otcplen);
27
18#endif /* _IPV6_NF_REJECT_H */ 28#endif /* _IPV6_NF_REJECT_H */
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 992ec49a96aa..44cb786b925a 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -112,6 +112,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
112 112
113 kfree_skb(skb); 113 kfree_skb(skb);
114} 114}
115EXPORT_SYMBOL_GPL(br_deliver);
115 116
116/* called with rcu_read_lock */ 117/* called with rcu_read_lock */
117void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) 118void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index da17a5eab8b4..074c557ab505 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -75,9 +75,11 @@ static const struct nf_chain_type filter_bridge = {
75 .type = NFT_CHAIN_T_DEFAULT, 75 .type = NFT_CHAIN_T_DEFAULT,
76 .family = NFPROTO_BRIDGE, 76 .family = NFPROTO_BRIDGE,
77 .owner = THIS_MODULE, 77 .owner = THIS_MODULE,
78 .hook_mask = (1 << NF_BR_LOCAL_IN) | 78 .hook_mask = (1 << NF_BR_PRE_ROUTING) |
79 (1 << NF_BR_LOCAL_IN) |
79 (1 << NF_BR_FORWARD) | 80 (1 << NF_BR_FORWARD) |
80 (1 << NF_BR_LOCAL_OUT), 81 (1 << NF_BR_LOCAL_OUT) |
82 (1 << NF_BR_POST_ROUTING),
81}; 83};
82 84
83static int __init nf_tables_bridge_init(void) 85static int __init nf_tables_bridge_init(void)
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index a76479535df2..654c9018e3e7 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -16,6 +16,238 @@
16#include <net/netfilter/nft_reject.h> 16#include <net/netfilter/nft_reject.h>
17#include <net/netfilter/ipv4/nf_reject.h> 17#include <net/netfilter/ipv4/nf_reject.h>
18#include <net/netfilter/ipv6/nf_reject.h> 18#include <net/netfilter/ipv6/nf_reject.h>
19#include <linux/ip.h>
20#include <net/ip.h>
21#include <linux/netfilter_bridge.h>
22#include "../br_private.h"
23
24static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
25 struct sk_buff *nskb)
26{
27 struct ethhdr *eth;
28
29 eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN);
30 skb_reset_mac_header(nskb);
31 ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
32 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
33 eth->h_proto = eth_hdr(oldskb)->h_proto;
34 skb_pull(nskb, ETH_HLEN);
35}
36
37static int nft_reject_iphdr_validate(struct sk_buff *oldskb)
38{
39 struct iphdr *iph;
40 u32 len;
41
42 if (!pskb_may_pull(oldskb, sizeof(struct iphdr)))
43 return 0;
44
45 iph = ip_hdr(oldskb);
46 if (iph->ihl < 5 || iph->version != 4)
47 return 0;
48
49 len = ntohs(iph->tot_len);
50 if (oldskb->len < len)
51 return 0;
52 else if (len < (iph->ihl*4))
53 return 0;
54
55 if (!pskb_may_pull(oldskb, iph->ihl*4))
56 return 0;
57
58 return 1;
59}
60
61static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
62{
63 struct sk_buff *nskb;
64 struct iphdr *niph;
65 const struct tcphdr *oth;
66 struct tcphdr _oth;
67
68 if (!nft_reject_iphdr_validate(oldskb))
69 return;
70
71 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
72 if (!oth)
73 return;
74
75 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
76 LL_MAX_HEADER, GFP_ATOMIC);
77 if (!nskb)
78 return;
79
80 skb_reserve(nskb, LL_MAX_HEADER);
81 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
82 sysctl_ip_default_ttl);
83 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
84 niph->ttl = sysctl_ip_default_ttl;
85 niph->tot_len = htons(nskb->len);
86 ip_send_check(niph);
87
88 nft_reject_br_push_etherhdr(oldskb, nskb);
89
90 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
91}
92
93static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
94 u8 code)
95{
96 struct sk_buff *nskb;
97 struct iphdr *niph;
98 struct icmphdr *icmph;
99 unsigned int len;
100 void *payload;
101 __wsum csum;
102
103 if (!nft_reject_iphdr_validate(oldskb))
104 return;
105
106 /* IP header checks: fragment. */
107 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
108 return;
109
110 /* RFC says return as much as we can without exceeding 576 bytes. */
111 len = min_t(unsigned int, 536, oldskb->len);
112
113 if (!pskb_may_pull(oldskb, len))
114 return;
115
116 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0))
117 return;
118
119 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
120 LL_MAX_HEADER + len, GFP_ATOMIC);
121 if (!nskb)
122 return;
123
124 skb_reserve(nskb, LL_MAX_HEADER);
125 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
126 sysctl_ip_default_ttl);
127
128 skb_reset_transport_header(nskb);
129 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
130 memset(icmph, 0, sizeof(*icmph));
131 icmph->type = ICMP_DEST_UNREACH;
132 icmph->code = code;
133
134 payload = skb_put(nskb, len);
135 memcpy(payload, skb_network_header(oldskb), len);
136
137 csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
138 icmph->checksum = csum_fold(csum);
139
140 niph->tot_len = htons(nskb->len);
141 ip_send_check(niph);
142
143 nft_reject_br_push_etherhdr(oldskb, nskb);
144
145 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
146}
147
148static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb)
149{
150 struct ipv6hdr *hdr;
151 u32 pkt_len;
152
153 if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr)))
154 return 0;
155
156 hdr = ipv6_hdr(oldskb);
157 if (hdr->version != 6)
158 return 0;
159
160 pkt_len = ntohs(hdr->payload_len);
161 if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len)
162 return 0;
163
164 return 1;
165}
166
167static void nft_reject_br_send_v6_tcp_reset(struct net *net,
168 struct sk_buff *oldskb, int hook)
169{
170 struct sk_buff *nskb;
171 const struct tcphdr *oth;
172 struct tcphdr _oth;
173 unsigned int otcplen;
174 struct ipv6hdr *nip6h;
175
176 if (!nft_reject_ip6hdr_validate(oldskb))
177 return;
178
179 oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
180 if (!oth)
181 return;
182
183 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
184 LL_MAX_HEADER, GFP_ATOMIC);
185 if (!nskb)
186 return;
187
188 skb_reserve(nskb, LL_MAX_HEADER);
189 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
190 net->ipv6.devconf_all->hop_limit);
191 nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
192 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
193
194 nft_reject_br_push_etherhdr(oldskb, nskb);
195
196 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
197}
198
199static void nft_reject_br_send_v6_unreach(struct net *net,
200 struct sk_buff *oldskb, int hook,
201 u8 code)
202{
203 struct sk_buff *nskb;
204 struct ipv6hdr *nip6h;
205 struct icmp6hdr *icmp6h;
206 unsigned int len;
207 void *payload;
208
209 if (!nft_reject_ip6hdr_validate(oldskb))
210 return;
211
212 /* Include "As much of invoking packet as possible without the ICMPv6
213 * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
214 */
215 len = min_t(unsigned int, 1220, oldskb->len);
216
217 if (!pskb_may_pull(oldskb, len))
218 return;
219
220 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
221 LL_MAX_HEADER + len, GFP_ATOMIC);
222 if (!nskb)
223 return;
224
225 skb_reserve(nskb, LL_MAX_HEADER);
226 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
227 net->ipv6.devconf_all->hop_limit);
228
229 skb_reset_transport_header(nskb);
230 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
231 memset(icmp6h, 0, sizeof(*icmp6h));
232 icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
233 icmp6h->icmp6_code = code;
234
235 payload = skb_put(nskb, len);
236 memcpy(payload, skb_network_header(oldskb), len);
237 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
238
239 icmp6h->icmp6_cksum =
240 csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
241 nskb->len - sizeof(struct ipv6hdr),
242 IPPROTO_ICMPV6,
243 csum_partial(icmp6h,
244 nskb->len - sizeof(struct ipv6hdr),
245 0));
246
247 nft_reject_br_push_etherhdr(oldskb, nskb);
248
249 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
250}
19 251
20static void nft_reject_bridge_eval(const struct nft_expr *expr, 252static void nft_reject_bridge_eval(const struct nft_expr *expr,
21 struct nft_data data[NFT_REG_MAX + 1], 253 struct nft_data data[NFT_REG_MAX + 1],
@@ -23,35 +255,46 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
23{ 255{
24 struct nft_reject *priv = nft_expr_priv(expr); 256 struct nft_reject *priv = nft_expr_priv(expr);
25 struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); 257 struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
258 const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
259
260 if (is_broadcast_ether_addr(dest) ||
261 is_multicast_ether_addr(dest))
262 goto out;
26 263
27 switch (eth_hdr(pkt->skb)->h_proto) { 264 switch (eth_hdr(pkt->skb)->h_proto) {
28 case htons(ETH_P_IP): 265 case htons(ETH_P_IP):
29 switch (priv->type) { 266 switch (priv->type) {
30 case NFT_REJECT_ICMP_UNREACH: 267 case NFT_REJECT_ICMP_UNREACH:
31 nf_send_unreach(pkt->skb, priv->icmp_code); 268 nft_reject_br_send_v4_unreach(pkt->skb,
269 pkt->ops->hooknum,
270 priv->icmp_code);
32 break; 271 break;
33 case NFT_REJECT_TCP_RST: 272 case NFT_REJECT_TCP_RST:
34 nf_send_reset(pkt->skb, pkt->ops->hooknum); 273 nft_reject_br_send_v4_tcp_reset(pkt->skb,
274 pkt->ops->hooknum);
35 break; 275 break;
36 case NFT_REJECT_ICMPX_UNREACH: 276 case NFT_REJECT_ICMPX_UNREACH:
37 nf_send_unreach(pkt->skb, 277 nft_reject_br_send_v4_unreach(pkt->skb,
38 nft_reject_icmp_code(priv->icmp_code)); 278 pkt->ops->hooknum,
279 nft_reject_icmp_code(priv->icmp_code));
39 break; 280 break;
40 } 281 }
41 break; 282 break;
42 case htons(ETH_P_IPV6): 283 case htons(ETH_P_IPV6):
43 switch (priv->type) { 284 switch (priv->type) {
44 case NFT_REJECT_ICMP_UNREACH: 285 case NFT_REJECT_ICMP_UNREACH:
45 nf_send_unreach6(net, pkt->skb, priv->icmp_code, 286 nft_reject_br_send_v6_unreach(net, pkt->skb,
46 pkt->ops->hooknum); 287 pkt->ops->hooknum,
288 priv->icmp_code);
47 break; 289 break;
48 case NFT_REJECT_TCP_RST: 290 case NFT_REJECT_TCP_RST:
49 nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); 291 nft_reject_br_send_v6_tcp_reset(net, pkt->skb,
292 pkt->ops->hooknum);
50 break; 293 break;
51 case NFT_REJECT_ICMPX_UNREACH: 294 case NFT_REJECT_ICMPX_UNREACH:
52 nf_send_unreach6(net, pkt->skb, 295 nft_reject_br_send_v6_unreach(net, pkt->skb,
53 nft_reject_icmpv6_code(priv->icmp_code), 296 pkt->ops->hooknum,
54 pkt->ops->hooknum); 297 nft_reject_icmpv6_code(priv->icmp_code));
55 break; 298 break;
56 } 299 }
57 break; 300 break;
@@ -59,15 +302,38 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
59 /* No explicit way to reject this protocol, drop it. */ 302 /* No explicit way to reject this protocol, drop it. */
60 break; 303 break;
61 } 304 }
305out:
62 data[NFT_REG_VERDICT].verdict = NF_DROP; 306 data[NFT_REG_VERDICT].verdict = NF_DROP;
63} 307}
64 308
309static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain)
310{
311 struct nft_base_chain *basechain;
312
313 if (chain->flags & NFT_BASE_CHAIN) {
314 basechain = nft_base_chain(chain);
315
316 switch (basechain->ops[0].hooknum) {
317 case NF_BR_PRE_ROUTING:
318 case NF_BR_LOCAL_IN:
319 break;
320 default:
321 return -EOPNOTSUPP;
322 }
323 }
324 return 0;
325}
326
65static int nft_reject_bridge_init(const struct nft_ctx *ctx, 327static int nft_reject_bridge_init(const struct nft_ctx *ctx,
66 const struct nft_expr *expr, 328 const struct nft_expr *expr,
67 const struct nlattr * const tb[]) 329 const struct nlattr * const tb[])
68{ 330{
69 struct nft_reject *priv = nft_expr_priv(expr); 331 struct nft_reject *priv = nft_expr_priv(expr);
70 int icmp_code; 332 int icmp_code, err;
333
334 err = nft_reject_bridge_validate_hooks(ctx->chain);
335 if (err < 0)
336 return err;
71 337
72 if (tb[NFTA_REJECT_TYPE] == NULL) 338 if (tb[NFTA_REJECT_TYPE] == NULL)
73 return -EINVAL; 339 return -EINVAL;
@@ -116,6 +382,13 @@ nla_put_failure:
116 return -1; 382 return -1;
117} 383}
118 384
385static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
386 const struct nft_expr *expr,
387 const struct nft_data **data)
388{
389 return nft_reject_bridge_validate_hooks(ctx->chain);
390}
391
119static struct nft_expr_type nft_reject_bridge_type; 392static struct nft_expr_type nft_reject_bridge_type;
120static const struct nft_expr_ops nft_reject_bridge_ops = { 393static const struct nft_expr_ops nft_reject_bridge_ops = {
121 .type = &nft_reject_bridge_type, 394 .type = &nft_reject_bridge_type,
@@ -123,6 +396,7 @@ static const struct nft_expr_ops nft_reject_bridge_ops = {
123 .eval = nft_reject_bridge_eval, 396 .eval = nft_reject_bridge_eval,
124 .init = nft_reject_bridge_init, 397 .init = nft_reject_bridge_init,
125 .dump = nft_reject_bridge_dump, 398 .dump = nft_reject_bridge_dump,
399 .validate = nft_reject_bridge_validate,
126}; 400};
127 401
128static struct nft_expr_type nft_reject_bridge_type __read_mostly = { 402static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 92b303dbd5fc..1baaa83dfe5c 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -12,43 +12,39 @@
12#include <net/route.h> 12#include <net/route.h>
13#include <net/dst.h> 13#include <net/dst.h>
14#include <linux/netfilter_ipv4.h> 14#include <linux/netfilter_ipv4.h>
15#include <net/netfilter/ipv4/nf_reject.h>
15 16
16/* Send RST reply */ 17const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
17void nf_send_reset(struct sk_buff *oldskb, int hook) 18 struct tcphdr *_oth, int hook)
18{ 19{
19 struct sk_buff *nskb;
20 const struct iphdr *oiph;
21 struct iphdr *niph;
22 const struct tcphdr *oth; 20 const struct tcphdr *oth;
23 struct tcphdr _otcph, *tcph;
24 21
25 /* IP header checks: fragment. */ 22 /* IP header checks: fragment. */
26 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) 23 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
27 return; 24 return NULL;
28 25
29 oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), 26 oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
30 sizeof(_otcph), &_otcph); 27 sizeof(struct tcphdr), _oth);
31 if (oth == NULL) 28 if (oth == NULL)
32 return; 29 return NULL;
33 30
34 /* No RST for RST. */ 31 /* No RST for RST. */
35 if (oth->rst) 32 if (oth->rst)
36 return; 33 return NULL;
37
38 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
39 return;
40 34
41 /* Check checksum */ 35 /* Check checksum */
42 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) 36 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
43 return; 37 return NULL;
44 oiph = ip_hdr(oldskb);
45 38
46 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + 39 return oth;
47 LL_MAX_HEADER, GFP_ATOMIC); 40}
48 if (!nskb) 41EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get);
49 return;
50 42
51 skb_reserve(nskb, LL_MAX_HEADER); 43struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
44 const struct sk_buff *oldskb,
45 __be16 protocol, int ttl)
46{
47 struct iphdr *niph, *oiph = ip_hdr(oldskb);
52 48
53 skb_reset_network_header(nskb); 49 skb_reset_network_header(nskb);
54 niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); 50 niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
@@ -57,10 +53,23 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
57 niph->tos = 0; 53 niph->tos = 0;
58 niph->id = 0; 54 niph->id = 0;
59 niph->frag_off = htons(IP_DF); 55 niph->frag_off = htons(IP_DF);
60 niph->protocol = IPPROTO_TCP; 56 niph->protocol = protocol;
61 niph->check = 0; 57 niph->check = 0;
62 niph->saddr = oiph->daddr; 58 niph->saddr = oiph->daddr;
63 niph->daddr = oiph->saddr; 59 niph->daddr = oiph->saddr;
60 niph->ttl = ttl;
61
62 nskb->protocol = htons(ETH_P_IP);
63
64 return niph;
65}
66EXPORT_SYMBOL_GPL(nf_reject_iphdr_put);
67
68void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
69 const struct tcphdr *oth)
70{
71 struct iphdr *niph = ip_hdr(nskb);
72 struct tcphdr *tcph;
64 73
65 skb_reset_transport_header(nskb); 74 skb_reset_transport_header(nskb);
66 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); 75 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
@@ -69,9 +78,9 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
69 tcph->dest = oth->source; 78 tcph->dest = oth->source;
70 tcph->doff = sizeof(struct tcphdr) / 4; 79 tcph->doff = sizeof(struct tcphdr) / 4;
71 80
72 if (oth->ack) 81 if (oth->ack) {
73 tcph->seq = oth->ack_seq; 82 tcph->seq = oth->ack_seq;
74 else { 83 } else {
75 tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + 84 tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
76 oldskb->len - ip_hdrlen(oldskb) - 85 oldskb->len - ip_hdrlen(oldskb) -
77 (oth->doff << 2)); 86 (oth->doff << 2));
@@ -84,16 +93,43 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
84 nskb->ip_summed = CHECKSUM_PARTIAL; 93 nskb->ip_summed = CHECKSUM_PARTIAL;
85 nskb->csum_start = (unsigned char *)tcph - nskb->head; 94 nskb->csum_start = (unsigned char *)tcph - nskb->head;
86 nskb->csum_offset = offsetof(struct tcphdr, check); 95 nskb->csum_offset = offsetof(struct tcphdr, check);
96}
97EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
98
99/* Send RST reply */
100void nf_send_reset(struct sk_buff *oldskb, int hook)
101{
102 struct sk_buff *nskb;
103 const struct iphdr *oiph;
104 struct iphdr *niph;
105 const struct tcphdr *oth;
106 struct tcphdr _oth;
107
108 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
109 if (!oth)
110 return;
111
112 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
113 return;
114
115 oiph = ip_hdr(oldskb);
116
117 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
118 LL_MAX_HEADER, GFP_ATOMIC);
119 if (!nskb)
120 return;
87 121
88 /* ip_route_me_harder expects skb->dst to be set */ 122 /* ip_route_me_harder expects skb->dst to be set */
89 skb_dst_set_noref(nskb, skb_dst(oldskb)); 123 skb_dst_set_noref(nskb, skb_dst(oldskb));
90 124
91 nskb->protocol = htons(ETH_P_IP); 125 skb_reserve(nskb, LL_MAX_HEADER);
126 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
127 ip4_dst_hoplimit(skb_dst(nskb)));
128 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
129
92 if (ip_route_me_harder(nskb, RTN_UNSPEC)) 130 if (ip_route_me_harder(nskb, RTN_UNSPEC))
93 goto free_nskb; 131 goto free_nskb;
94 132
95 niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
96
97 /* "Never happens" */ 133 /* "Never happens" */
98 if (nskb->len > dst_mtu(skb_dst(nskb))) 134 if (nskb->len > dst_mtu(skb_dst(nskb)))
99 goto free_nskb; 135 goto free_nskb;
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 20d9defc6c59..015eb8a80766 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -12,116 +12,102 @@
12#include <net/ip6_fib.h> 12#include <net/ip6_fib.h>
13#include <net/ip6_checksum.h> 13#include <net/ip6_checksum.h>
14#include <linux/netfilter_ipv6.h> 14#include <linux/netfilter_ipv6.h>
15#include <net/netfilter/ipv6/nf_reject.h>
15 16
16void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) 17const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
18 struct tcphdr *otcph,
19 unsigned int *otcplen, int hook)
17{ 20{
18 struct sk_buff *nskb;
19 struct tcphdr otcph, *tcph;
20 unsigned int otcplen, hh_len;
21 int tcphoff, needs_ack;
22 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); 21 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
23 struct ipv6hdr *ip6h;
24#define DEFAULT_TOS_VALUE 0x0U
25 const __u8 tclass = DEFAULT_TOS_VALUE;
26 struct dst_entry *dst = NULL;
27 u8 proto; 22 u8 proto;
28 __be16 frag_off; 23 __be16 frag_off;
29 struct flowi6 fl6; 24 int tcphoff;
30
31 if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
32 (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
33 pr_debug("addr is not unicast.\n");
34 return;
35 }
36 25
37 proto = oip6h->nexthdr; 26 proto = oip6h->nexthdr;
38 tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); 27 tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data),
28 &proto, &frag_off);
39 29
40 if ((tcphoff < 0) || (tcphoff > oldskb->len)) { 30 if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
41 pr_debug("Cannot get TCP header.\n"); 31 pr_debug("Cannot get TCP header.\n");
42 return; 32 return NULL;
43 } 33 }
44 34
45 otcplen = oldskb->len - tcphoff; 35 *otcplen = oldskb->len - tcphoff;
46 36
47 /* IP header checks: fragment, too short. */ 37 /* IP header checks: fragment, too short. */
48 if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { 38 if (proto != IPPROTO_TCP || *otcplen < sizeof(struct tcphdr)) {
49 pr_debug("proto(%d) != IPPROTO_TCP, " 39 pr_debug("proto(%d) != IPPROTO_TCP or too short (len = %d)\n",
50 "or too short. otcplen = %d\n", 40 proto, *otcplen);
51 proto, otcplen); 41 return NULL;
52 return;
53 } 42 }
54 43
55 if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) 44 otcph = skb_header_pointer(oldskb, tcphoff, sizeof(struct tcphdr),
56 BUG(); 45 otcph);
46 if (otcph == NULL)
47 return NULL;
57 48
58 /* No RST for RST. */ 49 /* No RST for RST. */
59 if (otcph.rst) { 50 if (otcph->rst) {
60 pr_debug("RST is set\n"); 51 pr_debug("RST is set\n");
61 return; 52 return NULL;
62 } 53 }
63 54
64 /* Check checksum. */ 55 /* Check checksum. */
65 if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { 56 if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
66 pr_debug("TCP checksum is invalid\n"); 57 pr_debug("TCP checksum is invalid\n");
67 return; 58 return NULL;
68 }
69
70 memset(&fl6, 0, sizeof(fl6));
71 fl6.flowi6_proto = IPPROTO_TCP;
72 fl6.saddr = oip6h->daddr;
73 fl6.daddr = oip6h->saddr;
74 fl6.fl6_sport = otcph.dest;
75 fl6.fl6_dport = otcph.source;
76 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
77 dst = ip6_route_output(net, NULL, &fl6);
78 if (dst == NULL || dst->error) {
79 dst_release(dst);
80 return;
81 }
82 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
83 if (IS_ERR(dst))
84 return;
85
86 hh_len = (dst->dev->hard_header_len + 15)&~15;
87 nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
88 + sizeof(struct tcphdr) + dst->trailer_len,
89 GFP_ATOMIC);
90
91 if (!nskb) {
92 net_dbg_ratelimited("cannot alloc skb\n");
93 dst_release(dst);
94 return;
95 } 59 }
96 60
97 skb_dst_set(nskb, dst); 61 return otcph;
62}
63EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_get);
98 64
99 skb_reserve(nskb, hh_len + dst->header_len); 65struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
66 const struct sk_buff *oldskb,
67 __be16 protocol, int hoplimit)
68{
69 struct ipv6hdr *ip6h;
70 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
71#define DEFAULT_TOS_VALUE 0x0U
72 const __u8 tclass = DEFAULT_TOS_VALUE;
100 73
101 skb_put(nskb, sizeof(struct ipv6hdr)); 74 skb_put(nskb, sizeof(struct ipv6hdr));
102 skb_reset_network_header(nskb); 75 skb_reset_network_header(nskb);
103 ip6h = ipv6_hdr(nskb); 76 ip6h = ipv6_hdr(nskb);
104 ip6_flow_hdr(ip6h, tclass, 0); 77 ip6_flow_hdr(ip6h, tclass, 0);
105 ip6h->hop_limit = ip6_dst_hoplimit(dst); 78 ip6h->hop_limit = hoplimit;
106 ip6h->nexthdr = IPPROTO_TCP; 79 ip6h->nexthdr = protocol;
107 ip6h->saddr = oip6h->daddr; 80 ip6h->saddr = oip6h->daddr;
108 ip6h->daddr = oip6h->saddr; 81 ip6h->daddr = oip6h->saddr;
109 82
83 nskb->protocol = htons(ETH_P_IPV6);
84
85 return ip6h;
86}
87EXPORT_SYMBOL_GPL(nf_reject_ip6hdr_put);
88
89void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
90 const struct sk_buff *oldskb,
91 const struct tcphdr *oth, unsigned int otcplen)
92{
93 struct tcphdr *tcph;
94 int needs_ack;
95
110 skb_reset_transport_header(nskb); 96 skb_reset_transport_header(nskb);
111 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); 97 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
112 /* Truncate to length (no data) */ 98 /* Truncate to length (no data) */
113 tcph->doff = sizeof(struct tcphdr)/4; 99 tcph->doff = sizeof(struct tcphdr)/4;
114 tcph->source = otcph.dest; 100 tcph->source = oth->dest;
115 tcph->dest = otcph.source; 101 tcph->dest = oth->source;
116 102
117 if (otcph.ack) { 103 if (oth->ack) {
118 needs_ack = 0; 104 needs_ack = 0;
119 tcph->seq = otcph.ack_seq; 105 tcph->seq = oth->ack_seq;
120 tcph->ack_seq = 0; 106 tcph->ack_seq = 0;
121 } else { 107 } else {
122 needs_ack = 1; 108 needs_ack = 1;
123 tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin 109 tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
124 + otcplen - (otcph.doff<<2)); 110 otcplen - (oth->doff<<2));
125 tcph->seq = 0; 111 tcph->seq = 0;
126 } 112 }
127 113
@@ -139,6 +125,63 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
139 sizeof(struct tcphdr), IPPROTO_TCP, 125 sizeof(struct tcphdr), IPPROTO_TCP,
140 csum_partial(tcph, 126 csum_partial(tcph,
141 sizeof(struct tcphdr), 0)); 127 sizeof(struct tcphdr), 0));
128}
129EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put);
130
131void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
132{
133 struct sk_buff *nskb;
134 struct tcphdr _otcph;
135 const struct tcphdr *otcph;
136 unsigned int otcplen, hh_len;
137 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
138 struct ipv6hdr *ip6h;
139 struct dst_entry *dst = NULL;
140 struct flowi6 fl6;
141
142 if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
143 (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
144 pr_debug("addr is not unicast.\n");
145 return;
146 }
147
148 otcph = nf_reject_ip6_tcphdr_get(oldskb, &_otcph, &otcplen, hook);
149 if (!otcph)
150 return;
151
152 memset(&fl6, 0, sizeof(fl6));
153 fl6.flowi6_proto = IPPROTO_TCP;
154 fl6.saddr = oip6h->daddr;
155 fl6.daddr = oip6h->saddr;
156 fl6.fl6_sport = otcph->dest;
157 fl6.fl6_dport = otcph->source;
158 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
159 dst = ip6_route_output(net, NULL, &fl6);
160 if (dst == NULL || dst->error) {
161 dst_release(dst);
162 return;
163 }
164 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
165 if (IS_ERR(dst))
166 return;
167
168 hh_len = (dst->dev->hard_header_len + 15)&~15;
169 nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
170 + sizeof(struct tcphdr) + dst->trailer_len,
171 GFP_ATOMIC);
172
173 if (!nskb) {
174 net_dbg_ratelimited("cannot alloc skb\n");
175 dst_release(dst);
176 return;
177 }
178
179 skb_dst_set(nskb, dst);
180
181 skb_reserve(nskb, hh_len + dst->header_len);
182 ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
183 ip6_dst_hoplimit(dst));
184 nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
142 185
143 nf_ct_attach(nskb, oldskb); 186 nf_ct_attach(nskb, oldskb);
144 187
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 91f17c1eb8a2..437a3663ad03 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -316,7 +316,7 @@ __ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
316 if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, 316 if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
317 local))) { 317 local))) {
318 IP_VS_DBG_RL("We are crossing local and non-local addresses" 318 IP_VS_DBG_RL("We are crossing local and non-local addresses"
319 " daddr=%pI4\n", &dest->addr.ip); 319 " daddr=%pI4\n", &daddr);
320 goto err_put; 320 goto err_put;
321 } 321 }
322 322
@@ -458,7 +458,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
458 if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, 458 if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
459 local))) { 459 local))) {
460 IP_VS_DBG_RL("We are crossing local and non-local addresses" 460 IP_VS_DBG_RL("We are crossing local and non-local addresses"
461 " daddr=%pI6\n", &dest->addr.in6); 461 " daddr=%pI6\n", daddr);
462 goto err_put; 462 goto err_put;
463 } 463 }
464 464