aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2014-10-25 12:40:26 -0400
committerPablo Neira Ayuso <pablo@netfilter.org>2014-10-31 07:50:08 -0400
commit523b929d5446c023e1219aa81455a8c766cac883 (patch)
tree3ecc2b3ae4776fdf86c8d7c4322a8297b814754b
parent8bfcdf6671b1c8006c52c3eaf9fd1b5dfcf41c3d (diff)
netfilter: nft_reject_bridge: don't use IP stack to reject traffic
If the packet is received via the bridge stack, this cannot reject packets from the IP stack. This adds functions to build the reject packet and send it from the bridge stack. Comments and assumptions on this patch: 1) Validate the IPv4 and IPv6 headers before further processing, given that the packet comes from the bridge stack, we cannot assume they are clean. Truncated packets are dropped, we follow similar approach in the existing iptables match/target extensions that need to inspect layer 4 headers that is not available. This also includes packets that are directed to multicast and broadcast ethernet addresses. 2) br_deliver() is exported to inject the reject packet via bridge localout -> postrouting. So the approach is similar to what we already do in the iptables reject target. The reject packet is sent to the bridge port from which we have received the original packet. 3) The reject packet is forged based on the original packet. The TTL is set based on sysctl_ip_default_ttl for IPv4 and per-net ipv6.devconf_all hoplimit for IPv6. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
-rw-r--r--net/bridge/br_forward.c1
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c263
2 files changed, 254 insertions, 10 deletions
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 992ec49a96aa..44cb786b925a 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -112,6 +112,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
112 112
113 kfree_skb(skb); 113 kfree_skb(skb);
114} 114}
115EXPORT_SYMBOL_GPL(br_deliver);
115 116
116/* called with rcu_read_lock */ 117/* called with rcu_read_lock */
117void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) 118void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index a76479535df2..31b27e1bab9f 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -16,6 +16,237 @@
16#include <net/netfilter/nft_reject.h> 16#include <net/netfilter/nft_reject.h>
17#include <net/netfilter/ipv4/nf_reject.h> 17#include <net/netfilter/ipv4/nf_reject.h>
18#include <net/netfilter/ipv6/nf_reject.h> 18#include <net/netfilter/ipv6/nf_reject.h>
19#include <linux/ip.h>
20#include <net/ip.h>
21#include "../br_private.h"
22
23static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
24 struct sk_buff *nskb)
25{
26 struct ethhdr *eth;
27
28 eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN);
29 skb_reset_mac_header(nskb);
30 ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
31 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
32 eth->h_proto = eth_hdr(oldskb)->h_proto;
33 skb_pull(nskb, ETH_HLEN);
34}
35
36static int nft_reject_iphdr_validate(struct sk_buff *oldskb)
37{
38 struct iphdr *iph;
39 u32 len;
40
41 if (!pskb_may_pull(oldskb, sizeof(struct iphdr)))
42 return 0;
43
44 iph = ip_hdr(oldskb);
45 if (iph->ihl < 5 || iph->version != 4)
46 return 0;
47
48 len = ntohs(iph->tot_len);
49 if (oldskb->len < len)
50 return 0;
51 else if (len < (iph->ihl*4))
52 return 0;
53
54 if (!pskb_may_pull(oldskb, iph->ihl*4))
55 return 0;
56
57 return 1;
58}
59
60static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
61{
62 struct sk_buff *nskb;
63 struct iphdr *niph;
64 const struct tcphdr *oth;
65 struct tcphdr _oth;
66
67 if (!nft_reject_iphdr_validate(oldskb))
68 return;
69
70 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
71 if (!oth)
72 return;
73
74 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
75 LL_MAX_HEADER, GFP_ATOMIC);
76 if (!nskb)
77 return;
78
79 skb_reserve(nskb, LL_MAX_HEADER);
80 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
81 sysctl_ip_default_ttl);
82 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
83 niph->ttl = sysctl_ip_default_ttl;
84 niph->tot_len = htons(nskb->len);
85 ip_send_check(niph);
86
87 nft_reject_br_push_etherhdr(oldskb, nskb);
88
89 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
90}
91
92static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
93 u8 code)
94{
95 struct sk_buff *nskb;
96 struct iphdr *niph;
97 struct icmphdr *icmph;
98 unsigned int len;
99 void *payload;
100 __wsum csum;
101
102 if (!nft_reject_iphdr_validate(oldskb))
103 return;
104
105 /* IP header checks: fragment. */
106 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
107 return;
108
109 /* RFC says return as much as we can without exceeding 576 bytes. */
110 len = min_t(unsigned int, 536, oldskb->len);
111
112 if (!pskb_may_pull(oldskb, len))
113 return;
114
115 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0))
116 return;
117
118 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
119 LL_MAX_HEADER + len, GFP_ATOMIC);
120 if (!nskb)
121 return;
122
123 skb_reserve(nskb, LL_MAX_HEADER);
124 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
125 sysctl_ip_default_ttl);
126
127 skb_reset_transport_header(nskb);
128 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
129 memset(icmph, 0, sizeof(*icmph));
130 icmph->type = ICMP_DEST_UNREACH;
131 icmph->code = code;
132
133 payload = skb_put(nskb, len);
134 memcpy(payload, skb_network_header(oldskb), len);
135
136 csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
137 icmph->checksum = csum_fold(csum);
138
139 niph->tot_len = htons(nskb->len);
140 ip_send_check(niph);
141
142 nft_reject_br_push_etherhdr(oldskb, nskb);
143
144 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
145}
146
147static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb)
148{
149 struct ipv6hdr *hdr;
150 u32 pkt_len;
151
152 if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr)))
153 return 0;
154
155 hdr = ipv6_hdr(oldskb);
156 if (hdr->version != 6)
157 return 0;
158
159 pkt_len = ntohs(hdr->payload_len);
160 if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len)
161 return 0;
162
163 return 1;
164}
165
166static void nft_reject_br_send_v6_tcp_reset(struct net *net,
167 struct sk_buff *oldskb, int hook)
168{
169 struct sk_buff *nskb;
170 const struct tcphdr *oth;
171 struct tcphdr _oth;
172 unsigned int otcplen;
173 struct ipv6hdr *nip6h;
174
175 if (!nft_reject_ip6hdr_validate(oldskb))
176 return;
177
178 oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
179 if (!oth)
180 return;
181
182 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
183 LL_MAX_HEADER, GFP_ATOMIC);
184 if (!nskb)
185 return;
186
187 skb_reserve(nskb, LL_MAX_HEADER);
188 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
189 net->ipv6.devconf_all->hop_limit);
190 nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
191 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
192
193 nft_reject_br_push_etherhdr(oldskb, nskb);
194
195 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
196}
197
198static void nft_reject_br_send_v6_unreach(struct net *net,
199 struct sk_buff *oldskb, int hook,
200 u8 code)
201{
202 struct sk_buff *nskb;
203 struct ipv6hdr *nip6h;
204 struct icmp6hdr *icmp6h;
205 unsigned int len;
206 void *payload;
207
208 if (!nft_reject_ip6hdr_validate(oldskb))
209 return;
210
211 /* Include "As much of invoking packet as possible without the ICMPv6
212 * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
213 */
214 len = min_t(unsigned int, 1220, oldskb->len);
215
216 if (!pskb_may_pull(oldskb, len))
217 return;
218
219 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
220 LL_MAX_HEADER + len, GFP_ATOMIC);
221 if (!nskb)
222 return;
223
224 skb_reserve(nskb, LL_MAX_HEADER);
225 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
226 net->ipv6.devconf_all->hop_limit);
227
228 skb_reset_transport_header(nskb);
229 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
230 memset(icmp6h, 0, sizeof(*icmp6h));
231 icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
232 icmp6h->icmp6_code = code;
233
234 payload = skb_put(nskb, len);
235 memcpy(payload, skb_network_header(oldskb), len);
236 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
237
238 icmp6h->icmp6_cksum =
239 csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
240 nskb->len - sizeof(struct ipv6hdr),
241 IPPROTO_ICMPV6,
242 csum_partial(icmp6h,
243 nskb->len - sizeof(struct ipv6hdr),
244 0));
245
246 nft_reject_br_push_etherhdr(oldskb, nskb);
247
248 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
249}
19 250
20static void nft_reject_bridge_eval(const struct nft_expr *expr, 251static void nft_reject_bridge_eval(const struct nft_expr *expr,
21 struct nft_data data[NFT_REG_MAX + 1], 252 struct nft_data data[NFT_REG_MAX + 1],
@@ -23,35 +254,46 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
23{ 254{
24 struct nft_reject *priv = nft_expr_priv(expr); 255 struct nft_reject *priv = nft_expr_priv(expr);
25 struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); 256 struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
257 const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
258
259 if (is_broadcast_ether_addr(dest) ||
260 is_multicast_ether_addr(dest))
261 goto out;
26 262
27 switch (eth_hdr(pkt->skb)->h_proto) { 263 switch (eth_hdr(pkt->skb)->h_proto) {
28 case htons(ETH_P_IP): 264 case htons(ETH_P_IP):
29 switch (priv->type) { 265 switch (priv->type) {
30 case NFT_REJECT_ICMP_UNREACH: 266 case NFT_REJECT_ICMP_UNREACH:
31 nf_send_unreach(pkt->skb, priv->icmp_code); 267 nft_reject_br_send_v4_unreach(pkt->skb,
268 pkt->ops->hooknum,
269 priv->icmp_code);
32 break; 270 break;
33 case NFT_REJECT_TCP_RST: 271 case NFT_REJECT_TCP_RST:
34 nf_send_reset(pkt->skb, pkt->ops->hooknum); 272 nft_reject_br_send_v4_tcp_reset(pkt->skb,
273 pkt->ops->hooknum);
35 break; 274 break;
36 case NFT_REJECT_ICMPX_UNREACH: 275 case NFT_REJECT_ICMPX_UNREACH:
37 nf_send_unreach(pkt->skb, 276 nft_reject_br_send_v4_unreach(pkt->skb,
38 nft_reject_icmp_code(priv->icmp_code)); 277 pkt->ops->hooknum,
278 nft_reject_icmp_code(priv->icmp_code));
39 break; 279 break;
40 } 280 }
41 break; 281 break;
42 case htons(ETH_P_IPV6): 282 case htons(ETH_P_IPV6):
43 switch (priv->type) { 283 switch (priv->type) {
44 case NFT_REJECT_ICMP_UNREACH: 284 case NFT_REJECT_ICMP_UNREACH:
45 nf_send_unreach6(net, pkt->skb, priv->icmp_code, 285 nft_reject_br_send_v6_unreach(net, pkt->skb,
46 pkt->ops->hooknum); 286 pkt->ops->hooknum,
287 priv->icmp_code);
47 break; 288 break;
48 case NFT_REJECT_TCP_RST: 289 case NFT_REJECT_TCP_RST:
49 nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); 290 nft_reject_br_send_v6_tcp_reset(net, pkt->skb,
291 pkt->ops->hooknum);
50 break; 292 break;
51 case NFT_REJECT_ICMPX_UNREACH: 293 case NFT_REJECT_ICMPX_UNREACH:
52 nf_send_unreach6(net, pkt->skb, 294 nft_reject_br_send_v6_unreach(net, pkt->skb,
53 nft_reject_icmpv6_code(priv->icmp_code), 295 pkt->ops->hooknum,
54 pkt->ops->hooknum); 296 nft_reject_icmpv6_code(priv->icmp_code));
55 break; 297 break;
56 } 298 }
57 break; 299 break;
@@ -59,6 +301,7 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
59 /* No explicit way to reject this protocol, drop it. */ 301 /* No explicit way to reject this protocol, drop it. */
60 break; 302 break;
61 } 303 }
304out:
62 data[NFT_REG_VERDICT].verdict = NF_DROP; 305 data[NFT_REG_VERDICT].verdict = NF_DROP;
63} 306}
64 307