aboutsummaryrefslogtreecommitdiffstats
path: root/net/bridge
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-11-01 14:53:27 -0400
committerDavid S. Miller <davem@davemloft.net>2014-11-01 14:53:27 -0400
commit55b42b5ca2dcf143465968697fe6c6503b05fca1 (patch)
tree91878cd53efc44ba67244d4d3897020828c87c01 /net/bridge
parent10738eeaf4ab3de092586cefcc082e7d43ca0044 (diff)
parentec1f1276022e4e3ca40871810217d513e39ff250 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/phy/marvell.c Simple overlapping changes in drivers/net/phy/marvell.c Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/bridge')
-rw-r--r--net/bridge/br_forward.c1
-rw-r--r--net/bridge/br_netfilter.c24
-rw-r--r--net/bridge/netfilter/nf_tables_bridge.c6
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c296
4 files changed, 295 insertions, 32 deletions
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 1510b54e6a2e..f96933a823e3 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -112,6 +112,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
112 112
113 kfree_skb(skb); 113 kfree_skb(skb);
114} 114}
115EXPORT_SYMBOL_GPL(br_deliver);
115 116
116/* called with rcu_read_lock */ 117/* called with rcu_read_lock */
117void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) 118void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 1bada53bb195..1a4f32c09ad5 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -192,7 +192,6 @@ static inline void nf_bridge_save_header(struct sk_buff *skb)
192 192
193static int br_parse_ip_options(struct sk_buff *skb) 193static int br_parse_ip_options(struct sk_buff *skb)
194{ 194{
195 struct ip_options *opt;
196 const struct iphdr *iph; 195 const struct iphdr *iph;
197 struct net_device *dev = skb->dev; 196 struct net_device *dev = skb->dev;
198 u32 len; 197 u32 len;
@@ -201,7 +200,6 @@ static int br_parse_ip_options(struct sk_buff *skb)
201 goto inhdr_error; 200 goto inhdr_error;
202 201
203 iph = ip_hdr(skb); 202 iph = ip_hdr(skb);
204 opt = &(IPCB(skb)->opt);
205 203
206 /* Basic sanity checks */ 204 /* Basic sanity checks */
207 if (iph->ihl < 5 || iph->version != 4) 205 if (iph->ihl < 5 || iph->version != 4)
@@ -227,23 +225,11 @@ static int br_parse_ip_options(struct sk_buff *skb)
227 } 225 }
228 226
229 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 227 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
230 if (iph->ihl == 5) 228 /* We should really parse IP options here but until
231 return 0; 229 * somebody who actually uses IP options complains to
232 230 * us we'll just silently ignore the options because
233 opt->optlen = iph->ihl*4 - sizeof(struct iphdr); 231 * we're lazy!
234 if (ip_options_compile(dev_net(dev), opt, skb)) 232 */
235 goto inhdr_error;
236
237 /* Check correct handling of SRR option */
238 if (unlikely(opt->srr)) {
239 struct in_device *in_dev = __in_dev_get_rcu(dev);
240 if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
241 goto drop;
242
243 if (ip_options_rcv_srr(skb))
244 goto drop;
245 }
246
247 return 0; 233 return 0;
248 234
249inhdr_error: 235inhdr_error:
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index da17a5eab8b4..074c557ab505 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -75,9 +75,11 @@ static const struct nf_chain_type filter_bridge = {
75 .type = NFT_CHAIN_T_DEFAULT, 75 .type = NFT_CHAIN_T_DEFAULT,
76 .family = NFPROTO_BRIDGE, 76 .family = NFPROTO_BRIDGE,
77 .owner = THIS_MODULE, 77 .owner = THIS_MODULE,
78 .hook_mask = (1 << NF_BR_LOCAL_IN) | 78 .hook_mask = (1 << NF_BR_PRE_ROUTING) |
79 (1 << NF_BR_LOCAL_IN) |
79 (1 << NF_BR_FORWARD) | 80 (1 << NF_BR_FORWARD) |
80 (1 << NF_BR_LOCAL_OUT), 81 (1 << NF_BR_LOCAL_OUT) |
82 (1 << NF_BR_POST_ROUTING),
81}; 83};
82 84
83static int __init nf_tables_bridge_init(void) 85static int __init nf_tables_bridge_init(void)
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index a76479535df2..654c9018e3e7 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -16,6 +16,238 @@
16#include <net/netfilter/nft_reject.h> 16#include <net/netfilter/nft_reject.h>
17#include <net/netfilter/ipv4/nf_reject.h> 17#include <net/netfilter/ipv4/nf_reject.h>
18#include <net/netfilter/ipv6/nf_reject.h> 18#include <net/netfilter/ipv6/nf_reject.h>
19#include <linux/ip.h>
20#include <net/ip.h>
21#include <linux/netfilter_bridge.h>
22#include "../br_private.h"
23
24static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
25 struct sk_buff *nskb)
26{
27 struct ethhdr *eth;
28
29 eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN);
30 skb_reset_mac_header(nskb);
31 ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
32 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
33 eth->h_proto = eth_hdr(oldskb)->h_proto;
34 skb_pull(nskb, ETH_HLEN);
35}
36
37static int nft_reject_iphdr_validate(struct sk_buff *oldskb)
38{
39 struct iphdr *iph;
40 u32 len;
41
42 if (!pskb_may_pull(oldskb, sizeof(struct iphdr)))
43 return 0;
44
45 iph = ip_hdr(oldskb);
46 if (iph->ihl < 5 || iph->version != 4)
47 return 0;
48
49 len = ntohs(iph->tot_len);
50 if (oldskb->len < len)
51 return 0;
52 else if (len < (iph->ihl*4))
53 return 0;
54
55 if (!pskb_may_pull(oldskb, iph->ihl*4))
56 return 0;
57
58 return 1;
59}
60
61static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
62{
63 struct sk_buff *nskb;
64 struct iphdr *niph;
65 const struct tcphdr *oth;
66 struct tcphdr _oth;
67
68 if (!nft_reject_iphdr_validate(oldskb))
69 return;
70
71 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
72 if (!oth)
73 return;
74
75 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
76 LL_MAX_HEADER, GFP_ATOMIC);
77 if (!nskb)
78 return;
79
80 skb_reserve(nskb, LL_MAX_HEADER);
81 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
82 sysctl_ip_default_ttl);
83 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
84 niph->ttl = sysctl_ip_default_ttl;
85 niph->tot_len = htons(nskb->len);
86 ip_send_check(niph);
87
88 nft_reject_br_push_etherhdr(oldskb, nskb);
89
90 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
91}
92
93static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
94 u8 code)
95{
96 struct sk_buff *nskb;
97 struct iphdr *niph;
98 struct icmphdr *icmph;
99 unsigned int len;
100 void *payload;
101 __wsum csum;
102
103 if (!nft_reject_iphdr_validate(oldskb))
104 return;
105
106 /* IP header checks: fragment. */
107 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
108 return;
109
110 /* RFC says return as much as we can without exceeding 576 bytes. */
111 len = min_t(unsigned int, 536, oldskb->len);
112
113 if (!pskb_may_pull(oldskb, len))
114 return;
115
116 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0))
117 return;
118
119 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
120 LL_MAX_HEADER + len, GFP_ATOMIC);
121 if (!nskb)
122 return;
123
124 skb_reserve(nskb, LL_MAX_HEADER);
125 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
126 sysctl_ip_default_ttl);
127
128 skb_reset_transport_header(nskb);
129 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
130 memset(icmph, 0, sizeof(*icmph));
131 icmph->type = ICMP_DEST_UNREACH;
132 icmph->code = code;
133
134 payload = skb_put(nskb, len);
135 memcpy(payload, skb_network_header(oldskb), len);
136
137 csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
138 icmph->checksum = csum_fold(csum);
139
140 niph->tot_len = htons(nskb->len);
141 ip_send_check(niph);
142
143 nft_reject_br_push_etherhdr(oldskb, nskb);
144
145 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
146}
147
148static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb)
149{
150 struct ipv6hdr *hdr;
151 u32 pkt_len;
152
153 if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr)))
154 return 0;
155
156 hdr = ipv6_hdr(oldskb);
157 if (hdr->version != 6)
158 return 0;
159
160 pkt_len = ntohs(hdr->payload_len);
161 if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len)
162 return 0;
163
164 return 1;
165}
166
167static void nft_reject_br_send_v6_tcp_reset(struct net *net,
168 struct sk_buff *oldskb, int hook)
169{
170 struct sk_buff *nskb;
171 const struct tcphdr *oth;
172 struct tcphdr _oth;
173 unsigned int otcplen;
174 struct ipv6hdr *nip6h;
175
176 if (!nft_reject_ip6hdr_validate(oldskb))
177 return;
178
179 oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
180 if (!oth)
181 return;
182
183 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
184 LL_MAX_HEADER, GFP_ATOMIC);
185 if (!nskb)
186 return;
187
188 skb_reserve(nskb, LL_MAX_HEADER);
189 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
190 net->ipv6.devconf_all->hop_limit);
191 nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
192 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
193
194 nft_reject_br_push_etherhdr(oldskb, nskb);
195
196 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
197}
198
199static void nft_reject_br_send_v6_unreach(struct net *net,
200 struct sk_buff *oldskb, int hook,
201 u8 code)
202{
203 struct sk_buff *nskb;
204 struct ipv6hdr *nip6h;
205 struct icmp6hdr *icmp6h;
206 unsigned int len;
207 void *payload;
208
209 if (!nft_reject_ip6hdr_validate(oldskb))
210 return;
211
212 /* Include "As much of invoking packet as possible without the ICMPv6
213 * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
214 */
215 len = min_t(unsigned int, 1220, oldskb->len);
216
217 if (!pskb_may_pull(oldskb, len))
218 return;
219
220 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
221 LL_MAX_HEADER + len, GFP_ATOMIC);
222 if (!nskb)
223 return;
224
225 skb_reserve(nskb, LL_MAX_HEADER);
226 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
227 net->ipv6.devconf_all->hop_limit);
228
229 skb_reset_transport_header(nskb);
230 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
231 memset(icmp6h, 0, sizeof(*icmp6h));
232 icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
233 icmp6h->icmp6_code = code;
234
235 payload = skb_put(nskb, len);
236 memcpy(payload, skb_network_header(oldskb), len);
237 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
238
239 icmp6h->icmp6_cksum =
240 csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
241 nskb->len - sizeof(struct ipv6hdr),
242 IPPROTO_ICMPV6,
243 csum_partial(icmp6h,
244 nskb->len - sizeof(struct ipv6hdr),
245 0));
246
247 nft_reject_br_push_etherhdr(oldskb, nskb);
248
249 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
250}
19 251
20static void nft_reject_bridge_eval(const struct nft_expr *expr, 252static void nft_reject_bridge_eval(const struct nft_expr *expr,
21 struct nft_data data[NFT_REG_MAX + 1], 253 struct nft_data data[NFT_REG_MAX + 1],
@@ -23,35 +255,46 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
23{ 255{
24 struct nft_reject *priv = nft_expr_priv(expr); 256 struct nft_reject *priv = nft_expr_priv(expr);
25 struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); 257 struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
258 const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
259
260 if (is_broadcast_ether_addr(dest) ||
261 is_multicast_ether_addr(dest))
262 goto out;
26 263
27 switch (eth_hdr(pkt->skb)->h_proto) { 264 switch (eth_hdr(pkt->skb)->h_proto) {
28 case htons(ETH_P_IP): 265 case htons(ETH_P_IP):
29 switch (priv->type) { 266 switch (priv->type) {
30 case NFT_REJECT_ICMP_UNREACH: 267 case NFT_REJECT_ICMP_UNREACH:
31 nf_send_unreach(pkt->skb, priv->icmp_code); 268 nft_reject_br_send_v4_unreach(pkt->skb,
269 pkt->ops->hooknum,
270 priv->icmp_code);
32 break; 271 break;
33 case NFT_REJECT_TCP_RST: 272 case NFT_REJECT_TCP_RST:
34 nf_send_reset(pkt->skb, pkt->ops->hooknum); 273 nft_reject_br_send_v4_tcp_reset(pkt->skb,
274 pkt->ops->hooknum);
35 break; 275 break;
36 case NFT_REJECT_ICMPX_UNREACH: 276 case NFT_REJECT_ICMPX_UNREACH:
37 nf_send_unreach(pkt->skb, 277 nft_reject_br_send_v4_unreach(pkt->skb,
38 nft_reject_icmp_code(priv->icmp_code)); 278 pkt->ops->hooknum,
279 nft_reject_icmp_code(priv->icmp_code));
39 break; 280 break;
40 } 281 }
41 break; 282 break;
42 case htons(ETH_P_IPV6): 283 case htons(ETH_P_IPV6):
43 switch (priv->type) { 284 switch (priv->type) {
44 case NFT_REJECT_ICMP_UNREACH: 285 case NFT_REJECT_ICMP_UNREACH:
45 nf_send_unreach6(net, pkt->skb, priv->icmp_code, 286 nft_reject_br_send_v6_unreach(net, pkt->skb,
46 pkt->ops->hooknum); 287 pkt->ops->hooknum,
288 priv->icmp_code);
47 break; 289 break;
48 case NFT_REJECT_TCP_RST: 290 case NFT_REJECT_TCP_RST:
49 nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); 291 nft_reject_br_send_v6_tcp_reset(net, pkt->skb,
292 pkt->ops->hooknum);
50 break; 293 break;
51 case NFT_REJECT_ICMPX_UNREACH: 294 case NFT_REJECT_ICMPX_UNREACH:
52 nf_send_unreach6(net, pkt->skb, 295 nft_reject_br_send_v6_unreach(net, pkt->skb,
53 nft_reject_icmpv6_code(priv->icmp_code), 296 pkt->ops->hooknum,
54 pkt->ops->hooknum); 297 nft_reject_icmpv6_code(priv->icmp_code));
55 break; 298 break;
56 } 299 }
57 break; 300 break;
@@ -59,15 +302,38 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
59 /* No explicit way to reject this protocol, drop it. */ 302 /* No explicit way to reject this protocol, drop it. */
60 break; 303 break;
61 } 304 }
305out:
62 data[NFT_REG_VERDICT].verdict = NF_DROP; 306 data[NFT_REG_VERDICT].verdict = NF_DROP;
63} 307}
64 308
309static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain)
310{
311 struct nft_base_chain *basechain;
312
313 if (chain->flags & NFT_BASE_CHAIN) {
314 basechain = nft_base_chain(chain);
315
316 switch (basechain->ops[0].hooknum) {
317 case NF_BR_PRE_ROUTING:
318 case NF_BR_LOCAL_IN:
319 break;
320 default:
321 return -EOPNOTSUPP;
322 }
323 }
324 return 0;
325}
326
65static int nft_reject_bridge_init(const struct nft_ctx *ctx, 327static int nft_reject_bridge_init(const struct nft_ctx *ctx,
66 const struct nft_expr *expr, 328 const struct nft_expr *expr,
67 const struct nlattr * const tb[]) 329 const struct nlattr * const tb[])
68{ 330{
69 struct nft_reject *priv = nft_expr_priv(expr); 331 struct nft_reject *priv = nft_expr_priv(expr);
70 int icmp_code; 332 int icmp_code, err;
333
334 err = nft_reject_bridge_validate_hooks(ctx->chain);
335 if (err < 0)
336 return err;
71 337
72 if (tb[NFTA_REJECT_TYPE] == NULL) 338 if (tb[NFTA_REJECT_TYPE] == NULL)
73 return -EINVAL; 339 return -EINVAL;
@@ -116,6 +382,13 @@ nla_put_failure:
116 return -1; 382 return -1;
117} 383}
118 384
385static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
386 const struct nft_expr *expr,
387 const struct nft_data **data)
388{
389 return nft_reject_bridge_validate_hooks(ctx->chain);
390}
391
119static struct nft_expr_type nft_reject_bridge_type; 392static struct nft_expr_type nft_reject_bridge_type;
120static const struct nft_expr_ops nft_reject_bridge_ops = { 393static const struct nft_expr_ops nft_reject_bridge_ops = {
121 .type = &nft_reject_bridge_type, 394 .type = &nft_reject_bridge_type,
@@ -123,6 +396,7 @@ static const struct nft_expr_ops nft_reject_bridge_ops = {
123 .eval = nft_reject_bridge_eval, 396 .eval = nft_reject_bridge_eval,
124 .init = nft_reject_bridge_init, 397 .init = nft_reject_bridge_init,
125 .dump = nft_reject_bridge_dump, 398 .dump = nft_reject_bridge_dump,
399 .validate = nft_reject_bridge_validate,
126}; 400};
127 401
128static struct nft_expr_type nft_reject_bridge_type __read_mostly = { 402static struct nft_expr_type nft_reject_bridge_type __read_mostly = {