aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-10-31 12:29:42 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-31 12:29:42 -0400
commite3a88f9c4f79a4d138a0ea464cfbac40ba46644c (patch)
treef3deeee3286b19fa6ac15d001cd1ba13fb78abf1 /net/ipv6
parentde11b0e8c569b96c2cf6a811e3805b7aeef498a3 (diff)
parent127917c29a432c3b798e014a1714e9c1af0f87fe (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== netfilter/ipvs fixes for net The following patchset contains fixes for netfilter/ipvs. This round of fixes is larger than usual at this stage, specifically because of the nf_tables bridge reject fixes that I would like to see in 3.18. The patches are: 1) Fix a null-pointer dereference that may occur when logging errors. This problem was introduced by 4a4739d56b0 ("ipvs: Pull out crosses_local_route_boundary logic") in v3.17-rc5. 2) Update hook mask in nft_reject_bridge so we can also filter out packets from there. This fixes 36d2af5 ("netfilter: nf_tables: allow to filter from prerouting and postrouting"), which needs this chunk to work. 3) Two patches to refactor common code to forge the IPv4 and IPv6 reject packets from the bridge. These are required by the nf_tables reject bridge fix. 4) Fix nft_reject_bridge by avoiding the use of the IP stack to reject packets from the bridge. The idea is to forge the reject packets and inject them to the original port via br_deliver() which is now exported for that purpose. 5) Restrict nft_reject_bridge to bridge prerouting and input hooks. the original skbuff may cloned after prerouting when the bridge stack needs to flood it to several bridge ports, it is too late to reject the traffic. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c175
1 files changed, 109 insertions, 66 deletions
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 20d9defc6c59..015eb8a80766 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -12,116 +12,102 @@
12#include <net/ip6_fib.h> 12#include <net/ip6_fib.h>
13#include <net/ip6_checksum.h> 13#include <net/ip6_checksum.h>
14#include <linux/netfilter_ipv6.h> 14#include <linux/netfilter_ipv6.h>
15#include <net/netfilter/ipv6/nf_reject.h>
15 16
16void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) 17const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
18 struct tcphdr *otcph,
19 unsigned int *otcplen, int hook)
17{ 20{
18 struct sk_buff *nskb;
19 struct tcphdr otcph, *tcph;
20 unsigned int otcplen, hh_len;
21 int tcphoff, needs_ack;
22 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); 21 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
23 struct ipv6hdr *ip6h;
24#define DEFAULT_TOS_VALUE 0x0U
25 const __u8 tclass = DEFAULT_TOS_VALUE;
26 struct dst_entry *dst = NULL;
27 u8 proto; 22 u8 proto;
28 __be16 frag_off; 23 __be16 frag_off;
29 struct flowi6 fl6; 24 int tcphoff;
30
31 if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
32 (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
33 pr_debug("addr is not unicast.\n");
34 return;
35 }
36 25
37 proto = oip6h->nexthdr; 26 proto = oip6h->nexthdr;
38 tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); 27 tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data),
28 &proto, &frag_off);
39 29
40 if ((tcphoff < 0) || (tcphoff > oldskb->len)) { 30 if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
41 pr_debug("Cannot get TCP header.\n"); 31 pr_debug("Cannot get TCP header.\n");
42 return; 32 return NULL;
43 } 33 }
44 34
45 otcplen = oldskb->len - tcphoff; 35 *otcplen = oldskb->len - tcphoff;
46 36
47 /* IP header checks: fragment, too short. */ 37 /* IP header checks: fragment, too short. */
48 if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { 38 if (proto != IPPROTO_TCP || *otcplen < sizeof(struct tcphdr)) {
49 pr_debug("proto(%d) != IPPROTO_TCP, " 39 pr_debug("proto(%d) != IPPROTO_TCP or too short (len = %d)\n",
50 "or too short. otcplen = %d\n", 40 proto, *otcplen);
51 proto, otcplen); 41 return NULL;
52 return;
53 } 42 }
54 43
55 if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) 44 otcph = skb_header_pointer(oldskb, tcphoff, sizeof(struct tcphdr),
56 BUG(); 45 otcph);
46 if (otcph == NULL)
47 return NULL;
57 48
58 /* No RST for RST. */ 49 /* No RST for RST. */
59 if (otcph.rst) { 50 if (otcph->rst) {
60 pr_debug("RST is set\n"); 51 pr_debug("RST is set\n");
61 return; 52 return NULL;
62 } 53 }
63 54
64 /* Check checksum. */ 55 /* Check checksum. */
65 if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { 56 if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
66 pr_debug("TCP checksum is invalid\n"); 57 pr_debug("TCP checksum is invalid\n");
67 return; 58 return NULL;
68 }
69
70 memset(&fl6, 0, sizeof(fl6));
71 fl6.flowi6_proto = IPPROTO_TCP;
72 fl6.saddr = oip6h->daddr;
73 fl6.daddr = oip6h->saddr;
74 fl6.fl6_sport = otcph.dest;
75 fl6.fl6_dport = otcph.source;
76 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
77 dst = ip6_route_output(net, NULL, &fl6);
78 if (dst == NULL || dst->error) {
79 dst_release(dst);
80 return;
81 }
82 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
83 if (IS_ERR(dst))
84 return;
85
86 hh_len = (dst->dev->hard_header_len + 15)&~15;
87 nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
88 + sizeof(struct tcphdr) + dst->trailer_len,
89 GFP_ATOMIC);
90
91 if (!nskb) {
92 net_dbg_ratelimited("cannot alloc skb\n");
93 dst_release(dst);
94 return;
95 } 59 }
96 60
97 skb_dst_set(nskb, dst); 61 return otcph;
62}
63EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_get);
98 64
99 skb_reserve(nskb, hh_len + dst->header_len); 65struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
66 const struct sk_buff *oldskb,
67 __be16 protocol, int hoplimit)
68{
69 struct ipv6hdr *ip6h;
70 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
71#define DEFAULT_TOS_VALUE 0x0U
72 const __u8 tclass = DEFAULT_TOS_VALUE;
100 73
101 skb_put(nskb, sizeof(struct ipv6hdr)); 74 skb_put(nskb, sizeof(struct ipv6hdr));
102 skb_reset_network_header(nskb); 75 skb_reset_network_header(nskb);
103 ip6h = ipv6_hdr(nskb); 76 ip6h = ipv6_hdr(nskb);
104 ip6_flow_hdr(ip6h, tclass, 0); 77 ip6_flow_hdr(ip6h, tclass, 0);
105 ip6h->hop_limit = ip6_dst_hoplimit(dst); 78 ip6h->hop_limit = hoplimit;
106 ip6h->nexthdr = IPPROTO_TCP; 79 ip6h->nexthdr = protocol;
107 ip6h->saddr = oip6h->daddr; 80 ip6h->saddr = oip6h->daddr;
108 ip6h->daddr = oip6h->saddr; 81 ip6h->daddr = oip6h->saddr;
109 82
83 nskb->protocol = htons(ETH_P_IPV6);
84
85 return ip6h;
86}
87EXPORT_SYMBOL_GPL(nf_reject_ip6hdr_put);
88
89void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
90 const struct sk_buff *oldskb,
91 const struct tcphdr *oth, unsigned int otcplen)
92{
93 struct tcphdr *tcph;
94 int needs_ack;
95
110 skb_reset_transport_header(nskb); 96 skb_reset_transport_header(nskb);
111 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); 97 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
112 /* Truncate to length (no data) */ 98 /* Truncate to length (no data) */
113 tcph->doff = sizeof(struct tcphdr)/4; 99 tcph->doff = sizeof(struct tcphdr)/4;
114 tcph->source = otcph.dest; 100 tcph->source = oth->dest;
115 tcph->dest = otcph.source; 101 tcph->dest = oth->source;
116 102
117 if (otcph.ack) { 103 if (oth->ack) {
118 needs_ack = 0; 104 needs_ack = 0;
119 tcph->seq = otcph.ack_seq; 105 tcph->seq = oth->ack_seq;
120 tcph->ack_seq = 0; 106 tcph->ack_seq = 0;
121 } else { 107 } else {
122 needs_ack = 1; 108 needs_ack = 1;
123 tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin 109 tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
124 + otcplen - (otcph.doff<<2)); 110 otcplen - (oth->doff<<2));
125 tcph->seq = 0; 111 tcph->seq = 0;
126 } 112 }
127 113
@@ -139,6 +125,63 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
139 sizeof(struct tcphdr), IPPROTO_TCP, 125 sizeof(struct tcphdr), IPPROTO_TCP,
140 csum_partial(tcph, 126 csum_partial(tcph,
141 sizeof(struct tcphdr), 0)); 127 sizeof(struct tcphdr), 0));
128}
129EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put);
130
131void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
132{
133 struct sk_buff *nskb;
134 struct tcphdr _otcph;
135 const struct tcphdr *otcph;
136 unsigned int otcplen, hh_len;
137 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
138 struct ipv6hdr *ip6h;
139 struct dst_entry *dst = NULL;
140 struct flowi6 fl6;
141
142 if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
143 (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
144 pr_debug("addr is not unicast.\n");
145 return;
146 }
147
148 otcph = nf_reject_ip6_tcphdr_get(oldskb, &_otcph, &otcplen, hook);
149 if (!otcph)
150 return;
151
152 memset(&fl6, 0, sizeof(fl6));
153 fl6.flowi6_proto = IPPROTO_TCP;
154 fl6.saddr = oip6h->daddr;
155 fl6.daddr = oip6h->saddr;
156 fl6.fl6_sport = otcph->dest;
157 fl6.fl6_dport = otcph->source;
158 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
159 dst = ip6_route_output(net, NULL, &fl6);
160 if (dst == NULL || dst->error) {
161 dst_release(dst);
162 return;
163 }
164 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
165 if (IS_ERR(dst))
166 return;
167
168 hh_len = (dst->dev->hard_header_len + 15)&~15;
169 nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
170 + sizeof(struct tcphdr) + dst->trailer_len,
171 GFP_ATOMIC);
172
173 if (!nskb) {
174 net_dbg_ratelimited("cannot alloc skb\n");
175 dst_release(dst);
176 return;
177 }
178
179 skb_dst_set(nskb, dst);
180
181 skb_reserve(nskb, hh_len + dst->header_len);
182 ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
183 ip6_dst_hoplimit(dst));
184 nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
142 185
143 nf_ct_attach(nskb, oldskb); 186 nf_ct_attach(nskb, oldskb);
144 187