aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorxeb@mail.ru <xeb@mail.ru>2012-08-09 20:51:50 -0400
committerDavid S. Miller <davem@davemloft.net>2012-08-14 17:28:32 -0400
commitc12b395a46646bab69089ce7016ac78177f6001f (patch)
treeec3bbef6602f42853b3dad5ae36c7d3872fa56eb
parentb7bc2a5b5bd99b216c3e5fe68c7f45c684ab5745 (diff)
gre: Support GRE over IPv6
GRE over IPv6 implementation. Signed-off-by: Dmitry Kozlov <xeb@mail.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/if_arp.h1
-rw-r--r--include/linux/if_tunnel.h3
-rw-r--r--include/linux/ip6_tunnel.h17
-rw-r--r--include/net/ip6_tunnel.h41
-rw-r--r--include/net/ipv6.h1
-rw-r--r--net/ipv6/Kconfig16
-rw-r--r--net/ipv6/Makefile1
-rw-r--r--net/ipv6/ip6_gre.c1790
-rw-r--r--net/ipv6/ip6_tunnel.c89
9 files changed, 1933 insertions, 26 deletions
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index f0e69c6e8208..9adcc29f084a 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -92,6 +92,7 @@
92#define ARPHRD_PHONET 820 /* PhoNet media type */ 92#define ARPHRD_PHONET 820 /* PhoNet media type */
93#define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */ 93#define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */
94#define ARPHRD_CAIF 822 /* CAIF media type */ 94#define ARPHRD_CAIF 822 /* CAIF media type */
95#define ARPHRD_IP6GRE 823 /* GRE over IPv6 */
95 96
96#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */ 97#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */
97#define ARPHRD_NONE 0xFFFE /* zero header length */ 98#define ARPHRD_NONE 0xFFFE /* zero header length */
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index 5efff60b6f56..8c5035ac3142 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -75,6 +75,9 @@ enum {
75 IFLA_GRE_TTL, 75 IFLA_GRE_TTL,
76 IFLA_GRE_TOS, 76 IFLA_GRE_TOS,
77 IFLA_GRE_PMTUDISC, 77 IFLA_GRE_PMTUDISC,
78 IFLA_GRE_ENCAP_LIMIT,
79 IFLA_GRE_FLOWINFO,
80 IFLA_GRE_FLAGS,
78 __IFLA_GRE_MAX, 81 __IFLA_GRE_MAX,
79}; 82};
80 83
diff --git a/include/linux/ip6_tunnel.h b/include/linux/ip6_tunnel.h
index bf22b0317902..48af63c9a48d 100644
--- a/include/linux/ip6_tunnel.h
+++ b/include/linux/ip6_tunnel.h
@@ -31,4 +31,21 @@ struct ip6_tnl_parm {
31 struct in6_addr raddr; /* remote tunnel end-point address */ 31 struct in6_addr raddr; /* remote tunnel end-point address */
32}; 32};
33 33
34struct ip6_tnl_parm2 {
35 char name[IFNAMSIZ]; /* name of tunnel device */
36 int link; /* ifindex of underlying L2 interface */
37 __u8 proto; /* tunnel protocol */
38 __u8 encap_limit; /* encapsulation limit for tunnel */
39 __u8 hop_limit; /* hop limit for tunnel */
40 __be32 flowinfo; /* traffic class and flowlabel for tunnel */
41 __u32 flags; /* tunnel flags */
42 struct in6_addr laddr; /* local tunnel end-point address */
43 struct in6_addr raddr; /* remote tunnel end-point address */
44
45 __be16 i_flags;
46 __be16 o_flags;
47 __be32 i_key;
48 __be32 o_key;
49};
50
34#endif 51#endif
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 358fb86f57eb..e03047f7090b 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -5,6 +5,8 @@
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6#include <linux/ip6_tunnel.h> 6#include <linux/ip6_tunnel.h>
7 7
8#define IP6TUNNEL_ERR_TIMEO (30*HZ)
9
8/* capable of sending packets */ 10/* capable of sending packets */
9#define IP6_TNL_F_CAP_XMIT 0x10000 11#define IP6_TNL_F_CAP_XMIT 0x10000
10/* capable of receiving packets */ 12/* capable of receiving packets */
@@ -12,15 +14,40 @@
12/* determine capability on a per-packet basis */ 14/* determine capability on a per-packet basis */
13#define IP6_TNL_F_CAP_PER_PACKET 0x40000 15#define IP6_TNL_F_CAP_PER_PACKET 0x40000
14 16
15/* IPv6 tunnel */ 17struct __ip6_tnl_parm {
18 char name[IFNAMSIZ]; /* name of tunnel device */
19 int link; /* ifindex of underlying L2 interface */
20 __u8 proto; /* tunnel protocol */
21 __u8 encap_limit; /* encapsulation limit for tunnel */
22 __u8 hop_limit; /* hop limit for tunnel */
23 __be32 flowinfo; /* traffic class and flowlabel for tunnel */
24 __u32 flags; /* tunnel flags */
25 struct in6_addr laddr; /* local tunnel end-point address */
26 struct in6_addr raddr; /* remote tunnel end-point address */
27
28 __be16 i_flags;
29 __be16 o_flags;
30 __be32 i_key;
31 __be32 o_key;
32};
16 33
34/* IPv6 tunnel */
17struct ip6_tnl { 35struct ip6_tnl {
18 struct ip6_tnl __rcu *next; /* next tunnel in list */ 36 struct ip6_tnl __rcu *next; /* next tunnel in list */
19 struct net_device *dev; /* virtual device associated with tunnel */ 37 struct net_device *dev; /* virtual device associated with tunnel */
20 struct ip6_tnl_parm parms; /* tunnel configuration parameters */ 38 struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
21 struct flowi fl; /* flowi template for xmit */ 39 struct flowi fl; /* flowi template for xmit */
22 struct dst_entry *dst_cache; /* cached dst */ 40 struct dst_entry *dst_cache; /* cached dst */
23 u32 dst_cookie; 41 u32 dst_cookie;
42
43 int err_count;
44 unsigned long err_time;
45
46 /* These fields used only by GRE */
47 __u32 i_seqno; /* The last seen seqno */
48 __u32 o_seqno; /* The last output seqno */
49 int hlen; /* Precalculated GRE header length */
50 int mlink;
24}; 51};
25 52
26/* Tunnel encapsulation limit destination sub-option */ 53/* Tunnel encapsulation limit destination sub-option */
@@ -31,4 +58,14 @@ struct ipv6_tlv_tnl_enc_lim {
31 __u8 encap_limit; /* tunnel encapsulation limit */ 58 __u8 encap_limit; /* tunnel encapsulation limit */
32} __packed; 59} __packed;
33 60
61struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t);
62void ip6_tnl_dst_reset(struct ip6_tnl *t);
63void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst);
64int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
65 const struct in6_addr *raddr);
66int ip6_tnl_xmit_ctl(struct ip6_tnl *t);
67__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
68__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
69 const struct in6_addr *raddr);
70
34#endif 71#endif
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 01c34b363a34..6d01fb00ff2b 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -34,6 +34,7 @@
34#define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */ 34#define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */
35#define NEXTHDR_ROUTING 43 /* Routing header. */ 35#define NEXTHDR_ROUTING 43 /* Routing header. */
36#define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */ 36#define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */
37#define NEXTHDR_GRE 47 /* GRE header. */
37#define NEXTHDR_ESP 50 /* Encapsulating security payload. */ 38#define NEXTHDR_ESP 50 /* Encapsulating security payload. */
38#define NEXTHDR_AUTH 51 /* Authentication header. */ 39#define NEXTHDR_AUTH 51 /* Authentication header. */
39#define NEXTHDR_ICMP 58 /* ICMP for IPv6. */ 40#define NEXTHDR_ICMP 58 /* ICMP for IPv6. */
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 5728695b5449..4f7fe7270e37 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -201,6 +201,22 @@ config IPV6_TUNNEL
201 201
202 If unsure, say N. 202 If unsure, say N.
203 203
204config IPV6_GRE
205 tristate "IPv6: GRE tunnel"
206 select IPV6_TUNNEL
207 ---help---
208 Tunneling means encapsulating data of one protocol type within
209 another protocol and sending it over a channel that understands the
210 encapsulating protocol. This particular tunneling driver implements
211 GRE (Generic Routing Encapsulation) and at this time allows
212 encapsulating of IPv4 or IPv6 over existing IPv6 infrastructure.
213 This driver is useful if the other endpoint is a Cisco router: Cisco
214 likes GRE much better than the other Linux tunneling driver ("IP
215 tunneling" above). In addition, GRE allows multicast redistribution
216 through the tunnel.
217
218 Saying M here will produce a module called ip6_gre. If unsure, say N.
219
204config IPV6_MULTIPLE_TABLES 220config IPV6_MULTIPLE_TABLES
205 bool "IPv6: Multiple Routing Tables" 221 bool "IPv6: Multiple Routing Tables"
206 depends on EXPERIMENTAL 222 depends on EXPERIMENTAL
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 686934acfac1..b6d3f79151e2 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/
36 36
37obj-$(CONFIG_IPV6_SIT) += sit.o 37obj-$(CONFIG_IPV6_SIT) += sit.o
38obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o 38obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
39obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
39 40
40obj-y += addrconf_core.o exthdrs_core.o 41obj-y += addrconf_core.o exthdrs_core.o
41 42
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
new file mode 100644
index 000000000000..a84ad5dc4fcf
--- /dev/null
+++ b/net/ipv6/ip6_gre.c
@@ -0,0 +1,1790 @@
1/*
2 * GRE over IPv6 protocol decoder.
3 *
4 * Authors: Dmitry Kozlov (xeb@mail.ru)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/capability.h>
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/uaccess.h>
21#include <linux/skbuff.h>
22#include <linux/netdevice.h>
23#include <linux/in.h>
24#include <linux/tcp.h>
25#include <linux/udp.h>
26#include <linux/if_arp.h>
27#include <linux/mroute.h>
28#include <linux/init.h>
29#include <linux/in6.h>
30#include <linux/inetdevice.h>
31#include <linux/igmp.h>
32#include <linux/netfilter_ipv4.h>
33#include <linux/etherdevice.h>
34#include <linux/if_ether.h>
35#include <linux/hash.h>
36#include <linux/if_tunnel.h>
37#include <linux/ip6_tunnel.h>
38
39#include <net/sock.h>
40#include <net/ip.h>
41#include <net/icmp.h>
42#include <net/protocol.h>
43#include <net/addrconf.h>
44#include <net/arp.h>
45#include <net/checksum.h>
46#include <net/dsfield.h>
47#include <net/inet_ecn.h>
48#include <net/xfrm.h>
49#include <net/net_namespace.h>
50#include <net/netns/generic.h>
51#include <net/rtnetlink.h>
52
53#include <net/ipv6.h>
54#include <net/ip6_fib.h>
55#include <net/ip6_route.h>
56#include <net/ip6_tunnel.h>
57
58
59#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
60#define IPV6_TCLASS_SHIFT 20
61
62#define HASH_SIZE_SHIFT 5
63#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
64
65static int ip6gre_net_id __read_mostly;
66struct ip6gre_net {
67 struct ip6_tnl __rcu *tunnels[4][HASH_SIZE];
68
69 struct net_device *fb_tunnel_dev;
70};
71
72static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
73static int ip6gre_tunnel_init(struct net_device *dev);
74static void ip6gre_tunnel_setup(struct net_device *dev);
75static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
76static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
77
78/* Tunnel hash table */
79
80/*
81 4 hash tables:
82
83 3: (remote,local)
84 2: (remote,*)
85 1: (*,local)
86 0: (*,*)
87
88 We require exact key match i.e. if a key is present in packet
89 it will match only tunnel with the same key; if it is not present,
90 it will match only keyless tunnel.
91
92 All keysless packets, if not matched configured keyless tunnels
93 will match fallback tunnel.
94 */
95
96#define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(HASH_SIZE - 1))
97static u32 HASH_ADDR(const struct in6_addr *addr)
98{
99 u32 hash = ipv6_addr_hash(addr);
100
101 return hash_32(hash, HASH_SIZE_SHIFT);
102}
103
104#define tunnels_r_l tunnels[3]
105#define tunnels_r tunnels[2]
106#define tunnels_l tunnels[1]
107#define tunnels_wc tunnels[0]
108/*
109 * Locking : hash tables are protected by RCU and RTNL
110 */
111
112#define for_each_ip_tunnel_rcu(start) \
113 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
114
115/* often modified stats are per cpu, other are shared (netdev->stats) */
116struct pcpu_tstats {
117 u64 rx_packets;
118 u64 rx_bytes;
119 u64 tx_packets;
120 u64 tx_bytes;
121 struct u64_stats_sync syncp;
122};
123
124static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev,
125 struct rtnl_link_stats64 *tot)
126{
127 int i;
128
129 for_each_possible_cpu(i) {
130 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
131 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
132 unsigned int start;
133
134 do {
135 start = u64_stats_fetch_begin_bh(&tstats->syncp);
136 rx_packets = tstats->rx_packets;
137 tx_packets = tstats->tx_packets;
138 rx_bytes = tstats->rx_bytes;
139 tx_bytes = tstats->tx_bytes;
140 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
141
142 tot->rx_packets += rx_packets;
143 tot->tx_packets += tx_packets;
144 tot->rx_bytes += rx_bytes;
145 tot->tx_bytes += tx_bytes;
146 }
147
148 tot->multicast = dev->stats.multicast;
149 tot->rx_crc_errors = dev->stats.rx_crc_errors;
150 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
151 tot->rx_length_errors = dev->stats.rx_length_errors;
152 tot->rx_errors = dev->stats.rx_errors;
153 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
154 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
155 tot->tx_dropped = dev->stats.tx_dropped;
156 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
157 tot->tx_errors = dev->stats.tx_errors;
158
159 return tot;
160}
161
162/* Given src, dst and key, find appropriate for input tunnel. */
163
164static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
165 const struct in6_addr *remote, const struct in6_addr *local,
166 __be32 key, __be16 gre_proto)
167{
168 struct net *net = dev_net(dev);
169 int link = dev->ifindex;
170 unsigned int h0 = HASH_ADDR(remote);
171 unsigned int h1 = HASH_KEY(key);
172 struct ip6_tnl *t, *cand = NULL;
173 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
174 int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
175 ARPHRD_ETHER : ARPHRD_IP6GRE;
176 int score, cand_score = 4;
177
178 for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
179 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
180 !ipv6_addr_equal(remote, &t->parms.raddr) ||
181 key != t->parms.i_key ||
182 !(t->dev->flags & IFF_UP))
183 continue;
184
185 if (t->dev->type != ARPHRD_IP6GRE &&
186 t->dev->type != dev_type)
187 continue;
188
189 score = 0;
190 if (t->parms.link != link)
191 score |= 1;
192 if (t->dev->type != dev_type)
193 score |= 2;
194 if (score == 0)
195 return t;
196
197 if (score < cand_score) {
198 cand = t;
199 cand_score = score;
200 }
201 }
202
203 for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
204 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
205 key != t->parms.i_key ||
206 !(t->dev->flags & IFF_UP))
207 continue;
208
209 if (t->dev->type != ARPHRD_IP6GRE &&
210 t->dev->type != dev_type)
211 continue;
212
213 score = 0;
214 if (t->parms.link != link)
215 score |= 1;
216 if (t->dev->type != dev_type)
217 score |= 2;
218 if (score == 0)
219 return t;
220
221 if (score < cand_score) {
222 cand = t;
223 cand_score = score;
224 }
225 }
226
227 for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
228 if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
229 (!ipv6_addr_equal(local, &t->parms.raddr) ||
230 !ipv6_addr_is_multicast(local))) ||
231 key != t->parms.i_key ||
232 !(t->dev->flags & IFF_UP))
233 continue;
234
235 if (t->dev->type != ARPHRD_IP6GRE &&
236 t->dev->type != dev_type)
237 continue;
238
239 score = 0;
240 if (t->parms.link != link)
241 score |= 1;
242 if (t->dev->type != dev_type)
243 score |= 2;
244 if (score == 0)
245 return t;
246
247 if (score < cand_score) {
248 cand = t;
249 cand_score = score;
250 }
251 }
252
253 for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
254 if (t->parms.i_key != key ||
255 !(t->dev->flags & IFF_UP))
256 continue;
257
258 if (t->dev->type != ARPHRD_IP6GRE &&
259 t->dev->type != dev_type)
260 continue;
261
262 score = 0;
263 if (t->parms.link != link)
264 score |= 1;
265 if (t->dev->type != dev_type)
266 score |= 2;
267 if (score == 0)
268 return t;
269
270 if (score < cand_score) {
271 cand = t;
272 cand_score = score;
273 }
274 }
275
276 if (cand != NULL)
277 return cand;
278
279 dev = ign->fb_tunnel_dev;
280 if (dev->flags & IFF_UP)
281 return netdev_priv(dev);
282
283 return NULL;
284}
285
286static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
287 const struct __ip6_tnl_parm *p)
288{
289 const struct in6_addr *remote = &p->raddr;
290 const struct in6_addr *local = &p->laddr;
291 unsigned int h = HASH_KEY(p->i_key);
292 int prio = 0;
293
294 if (!ipv6_addr_any(local))
295 prio |= 1;
296 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
297 prio |= 2;
298 h ^= HASH_ADDR(remote);
299 }
300
301 return &ign->tunnels[prio][h];
302}
303
304static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
305 const struct ip6_tnl *t)
306{
307 return __ip6gre_bucket(ign, &t->parms);
308}
309
310static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
311{
312 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
313
314 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
315 rcu_assign_pointer(*tp, t);
316}
317
318static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
319{
320 struct ip6_tnl __rcu **tp;
321 struct ip6_tnl *iter;
322
323 for (tp = ip6gre_bucket(ign, t);
324 (iter = rtnl_dereference(*tp)) != NULL;
325 tp = &iter->next) {
326 if (t == iter) {
327 rcu_assign_pointer(*tp, t->next);
328 break;
329 }
330 }
331}
332
333static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
334 const struct __ip6_tnl_parm *parms,
335 int type)
336{
337 const struct in6_addr *remote = &parms->raddr;
338 const struct in6_addr *local = &parms->laddr;
339 __be32 key = parms->i_key;
340 int link = parms->link;
341 struct ip6_tnl *t;
342 struct ip6_tnl __rcu **tp;
343 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
344
345 for (tp = __ip6gre_bucket(ign, parms);
346 (t = rtnl_dereference(*tp)) != NULL;
347 tp = &t->next)
348 if (ipv6_addr_equal(local, &t->parms.laddr) &&
349 ipv6_addr_equal(remote, &t->parms.raddr) &&
350 key == t->parms.i_key &&
351 link == t->parms.link &&
352 type == t->dev->type)
353 break;
354
355 return t;
356}
357
358static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
359 const struct __ip6_tnl_parm *parms, int create)
360{
361 struct ip6_tnl *t, *nt;
362 struct net_device *dev;
363 char name[IFNAMSIZ];
364 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
365
366 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
367 if (t || !create)
368 return t;
369
370 if (parms->name[0])
371 strlcpy(name, parms->name, IFNAMSIZ);
372 else
373 strcpy(name, "ip6gre%d");
374
375 dev = alloc_netdev(sizeof(*t), name, ip6gre_tunnel_setup);
376 if (!dev)
377 return NULL;
378
379 dev_net_set(dev, net);
380
381 nt = netdev_priv(dev);
382 nt->parms = *parms;
383 dev->rtnl_link_ops = &ip6gre_link_ops;
384
385 nt->dev = dev;
386 ip6gre_tnl_link_config(nt, 1);
387
388 if (register_netdevice(dev) < 0)
389 goto failed_free;
390
391 /* Can use a lockless transmit, unless we generate output sequences */
392 if (!(nt->parms.o_flags & GRE_SEQ))
393 dev->features |= NETIF_F_LLTX;
394
395 dev_hold(dev);
396 ip6gre_tunnel_link(ign, nt);
397 return nt;
398
399failed_free:
400 free_netdev(dev);
401 return NULL;
402}
403
404static void ip6gre_tunnel_uninit(struct net_device *dev)
405{
406 struct net *net = dev_net(dev);
407 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
408
409 ip6gre_tunnel_unlink(ign, netdev_priv(dev));
410 dev_put(dev);
411}
412
413
414static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
415 u8 type, u8 code, int offset, __be32 info)
416{
417 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
418 __be16 *p = (__be16 *)(ipv6h + 1);
419 int grehlen = sizeof(ipv6h) + 4;
420 struct ip6_tnl *t;
421 __be16 flags;
422
423 flags = p[0];
424 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
425 if (flags&(GRE_VERSION|GRE_ROUTING))
426 return;
427 if (flags&GRE_KEY) {
428 grehlen += 4;
429 if (flags&GRE_CSUM)
430 grehlen += 4;
431 }
432 }
433
434 /* If only 8 bytes returned, keyed message will be dropped here */
435 if (skb_headlen(skb) < grehlen)
436 return;
437
438 rcu_read_lock();
439
440 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
441 flags & GRE_KEY ?
442 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
443 p[1]);
444 if (t == NULL)
445 goto out;
446
447 switch (type) {
448 __u32 teli;
449 struct ipv6_tlv_tnl_enc_lim *tel;
450 __u32 mtu;
451 case ICMPV6_DEST_UNREACH:
452 net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
453 t->parms.name);
454 break;
455 case ICMPV6_TIME_EXCEED:
456 if (code == ICMPV6_EXC_HOPLIMIT) {
457 net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
458 t->parms.name);
459 }
460 break;
461 case ICMPV6_PARAMPROB:
462 teli = 0;
463 if (code == ICMPV6_HDR_FIELD)
464 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
465
466 if (teli && teli == info - 2) {
467 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
468 if (tel->encap_limit == 0) {
469 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
470 t->parms.name);
471 }
472 } else {
473 net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
474 t->parms.name);
475 }
476 break;
477 case ICMPV6_PKT_TOOBIG:
478 mtu = info - offset;
479 if (mtu < IPV6_MIN_MTU)
480 mtu = IPV6_MIN_MTU;
481 t->dev->mtu = mtu;
482 break;
483 }
484
485 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
486 t->err_count++;
487 else
488 t->err_count = 1;
489 t->err_time = jiffies;
490out:
491 rcu_read_unlock();
492}
493
494static inline void ip6gre_ecn_decapsulate_ipv4(const struct ip6_tnl *t,
495 const struct ipv6hdr *ipv6h, struct sk_buff *skb)
496{
497 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
498
499 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
500 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
501
502 if (INET_ECN_is_ce(dsfield))
503 IP_ECN_set_ce(ip_hdr(skb));
504}
505
506static inline void ip6gre_ecn_decapsulate_ipv6(const struct ip6_tnl *t,
507 const struct ipv6hdr *ipv6h, struct sk_buff *skb)
508{
509 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
510 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
511
512 if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h)))
513 IP6_ECN_set_ce(ipv6_hdr(skb));
514}
515
516static int ip6gre_rcv(struct sk_buff *skb)
517{
518 const struct ipv6hdr *ipv6h;
519 u8 *h;
520 __be16 flags;
521 __sum16 csum = 0;
522 __be32 key = 0;
523 u32 seqno = 0;
524 struct ip6_tnl *tunnel;
525 int offset = 4;
526 __be16 gre_proto;
527
528 if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
529 goto drop_nolock;
530
531 ipv6h = ipv6_hdr(skb);
532 h = skb->data;
533 flags = *(__be16 *)h;
534
535 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
536 /* - Version must be 0.
537 - We do not support routing headers.
538 */
539 if (flags&(GRE_VERSION|GRE_ROUTING))
540 goto drop_nolock;
541
542 if (flags&GRE_CSUM) {
543 switch (skb->ip_summed) {
544 case CHECKSUM_COMPLETE:
545 csum = csum_fold(skb->csum);
546 if (!csum)
547 break;
548 /* fall through */
549 case CHECKSUM_NONE:
550 skb->csum = 0;
551 csum = __skb_checksum_complete(skb);
552 skb->ip_summed = CHECKSUM_COMPLETE;
553 }
554 offset += 4;
555 }
556 if (flags&GRE_KEY) {
557 key = *(__be32 *)(h + offset);
558 offset += 4;
559 }
560 if (flags&GRE_SEQ) {
561 seqno = ntohl(*(__be32 *)(h + offset));
562 offset += 4;
563 }
564 }
565
566 gre_proto = *(__be16 *)(h + 2);
567
568 rcu_read_lock();
569 tunnel = ip6gre_tunnel_lookup(skb->dev,
570 &ipv6h->saddr, &ipv6h->daddr, key,
571 gre_proto);
572 if (tunnel) {
573 struct pcpu_tstats *tstats;
574
575 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
576 goto drop;
577
578 if (!ip6_tnl_rcv_ctl(tunnel, &ipv6h->daddr, &ipv6h->saddr)) {
579 tunnel->dev->stats.rx_dropped++;
580 goto drop;
581 }
582
583 secpath_reset(skb);
584
585 skb->protocol = gre_proto;
586 /* WCCP version 1 and 2 protocol decoding.
587 * - Change protocol to IP
588 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
589 */
590 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
591 skb->protocol = htons(ETH_P_IP);
592 if ((*(h + offset) & 0xF0) != 0x40)
593 offset += 4;
594 }
595
596 skb->mac_header = skb->network_header;
597 __pskb_pull(skb, offset);
598 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
599 skb->pkt_type = PACKET_HOST;
600
601 if (((flags&GRE_CSUM) && csum) ||
602 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
603 tunnel->dev->stats.rx_crc_errors++;
604 tunnel->dev->stats.rx_errors++;
605 goto drop;
606 }
607 if (tunnel->parms.i_flags&GRE_SEQ) {
608 if (!(flags&GRE_SEQ) ||
609 (tunnel->i_seqno &&
610 (s32)(seqno - tunnel->i_seqno) < 0)) {
611 tunnel->dev->stats.rx_fifo_errors++;
612 tunnel->dev->stats.rx_errors++;
613 goto drop;
614 }
615 tunnel->i_seqno = seqno + 1;
616 }
617
618 /* Warning: All skb pointers will be invalidated! */
619 if (tunnel->dev->type == ARPHRD_ETHER) {
620 if (!pskb_may_pull(skb, ETH_HLEN)) {
621 tunnel->dev->stats.rx_length_errors++;
622 tunnel->dev->stats.rx_errors++;
623 goto drop;
624 }
625
626 ipv6h = ipv6_hdr(skb);
627 skb->protocol = eth_type_trans(skb, tunnel->dev);
628 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
629 }
630
631 tstats = this_cpu_ptr(tunnel->dev->tstats);
632 u64_stats_update_begin(&tstats->syncp);
633 tstats->rx_packets++;
634 tstats->rx_bytes += skb->len;
635 u64_stats_update_end(&tstats->syncp);
636
637 __skb_tunnel_rx(skb, tunnel->dev);
638
639 skb_reset_network_header(skb);
640 if (skb->protocol == htons(ETH_P_IP))
641 ip6gre_ecn_decapsulate_ipv4(tunnel, ipv6h, skb);
642 else if (skb->protocol == htons(ETH_P_IPV6))
643 ip6gre_ecn_decapsulate_ipv6(tunnel, ipv6h, skb);
644
645 netif_rx(skb);
646
647 rcu_read_unlock();
648 return 0;
649 }
650 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
651
652drop:
653 rcu_read_unlock();
654drop_nolock:
655 kfree_skb(skb);
656 return 0;
657}
658
659struct ipv6_tel_txoption {
660 struct ipv6_txoptions ops;
661 __u8 dst_opt[8];
662};
663
664static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
665{
666 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
667
668 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
669 opt->dst_opt[3] = 1;
670 opt->dst_opt[4] = encap_limit;
671 opt->dst_opt[5] = IPV6_TLV_PADN;
672 opt->dst_opt[6] = 1;
673
674 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
675 opt->ops.opt_nflen = 8;
676}
677
678static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
679 struct net_device *dev,
680 __u8 dsfield,
681 struct flowi6 *fl6,
682 int encap_limit,
683 __u32 *pmtu)
684{
685 struct net *net = dev_net(dev);
686 struct ip6_tnl *tunnel = netdev_priv(dev);
687 struct net_device *tdev; /* Device to other host */
688 struct ipv6hdr *ipv6h; /* Our new IP header */
689 unsigned int max_headroom; /* The extra header space needed */
690 int gre_hlen;
691 struct ipv6_tel_txoption opt;
692 int mtu;
693 struct dst_entry *dst = NULL, *ndst = NULL;
694 struct net_device_stats *stats = &tunnel->dev->stats;
695 int err = -1;
696 u8 proto;
697 int pkt_len;
698 struct sk_buff *new_skb;
699
700 if (dev->type == ARPHRD_ETHER)
701 IPCB(skb)->flags = 0;
702
703 if (dev->header_ops && dev->type == ARPHRD_IP6GRE) {
704 gre_hlen = 0;
705 ipv6h = (struct ipv6hdr *)skb->data;
706 fl6->daddr = ipv6h->daddr;
707 } else {
708 gre_hlen = tunnel->hlen;
709 fl6->daddr = tunnel->parms.raddr;
710 }
711
712 if (!fl6->flowi6_mark)
713 dst = ip6_tnl_dst_check(tunnel);
714
715 if (!dst) {
716 ndst = ip6_route_output(net, NULL, fl6);
717
718 if (ndst->error)
719 goto tx_err_link_failure;
720 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
721 if (IS_ERR(ndst)) {
722 err = PTR_ERR(ndst);
723 ndst = NULL;
724 goto tx_err_link_failure;
725 }
726 dst = ndst;
727 }
728
729 tdev = dst->dev;
730
731 if (tdev == dev) {
732 stats->collisions++;
733 net_warn_ratelimited("%s: Local routing loop detected!\n",
734 tunnel->parms.name);
735 goto tx_err_dst_release;
736 }
737
738 mtu = dst_mtu(dst) - sizeof(*ipv6h);
739 if (encap_limit >= 0) {
740 max_headroom += 8;
741 mtu -= 8;
742 }
743 if (mtu < IPV6_MIN_MTU)
744 mtu = IPV6_MIN_MTU;
745 if (skb_dst(skb))
746 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
747 if (skb->len > mtu) {
748 *pmtu = mtu;
749 err = -EMSGSIZE;
750 goto tx_err_dst_release;
751 }
752
753 if (tunnel->err_count > 0) {
754 if (time_before(jiffies,
755 tunnel->err_time + IP6TUNNEL_ERR_TIMEO)) {
756 tunnel->err_count--;
757
758 dst_link_failure(skb);
759 } else
760 tunnel->err_count = 0;
761 }
762
763 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
764
765 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
766 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
767 new_skb = skb_realloc_headroom(skb, max_headroom);
768 if (max_headroom > dev->needed_headroom)
769 dev->needed_headroom = max_headroom;
770 if (!new_skb)
771 goto tx_err_dst_release;
772
773 if (skb->sk)
774 skb_set_owner_w(new_skb, skb->sk);
775 consume_skb(skb);
776 skb = new_skb;
777 }
778
779 skb_dst_drop(skb);
780
781 if (fl6->flowi6_mark) {
782 skb_dst_set(skb, dst);
783 ndst = NULL;
784 } else {
785 skb_dst_set_noref(skb, dst);
786 }
787
788 skb->transport_header = skb->network_header;
789
790 proto = NEXTHDR_GRE;
791 if (encap_limit >= 0) {
792 init_tel_txopt(&opt, encap_limit);
793 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
794 }
795
796 skb_push(skb, gre_hlen);
797 skb_reset_network_header(skb);
798
799 /*
800 * Push down and install the IP header.
801 */
802 ipv6h = ipv6_hdr(skb);
803 *(__be32 *)ipv6h = fl6->flowlabel | htonl(0x60000000);
804 dsfield = INET_ECN_encapsulate(0, dsfield);
805 ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
806 ipv6h->hop_limit = tunnel->parms.hop_limit;
807 ipv6h->nexthdr = proto;
808 ipv6h->saddr = fl6->saddr;
809 ipv6h->daddr = fl6->daddr;
810
811 ((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags;
812 ((__be16 *)(ipv6h + 1))[1] = (dev->type == ARPHRD_ETHER) ?
813 htons(ETH_P_TEB) : skb->protocol;
814
815 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
816 __be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4);
817
818 if (tunnel->parms.o_flags&GRE_SEQ) {
819 ++tunnel->o_seqno;
820 *ptr = htonl(tunnel->o_seqno);
821 ptr--;
822 }
823 if (tunnel->parms.o_flags&GRE_KEY) {
824 *ptr = tunnel->parms.o_key;
825 ptr--;
826 }
827 if (tunnel->parms.o_flags&GRE_CSUM) {
828 *ptr = 0;
829 *(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1),
830 skb->len - sizeof(struct ipv6hdr));
831 }
832 }
833
834 nf_reset(skb);
835 pkt_len = skb->len;
836 err = ip6_local_out(skb);
837
838 if (net_xmit_eval(err) == 0) {
839 struct pcpu_tstats *tstats = this_cpu_ptr(tunnel->dev->tstats);
840
841 tstats->tx_bytes += pkt_len;
842 tstats->tx_packets++;
843 } else {
844 stats->tx_errors++;
845 stats->tx_aborted_errors++;
846 }
847
848 if (ndst)
849 ip6_tnl_dst_store(tunnel, ndst);
850
851 return 0;
852tx_err_link_failure:
853 stats->tx_carrier_errors++;
854 dst_link_failure(skb);
855tx_err_dst_release:
856 dst_release(ndst);
857 return err;
858}
859
860static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
861{
862 struct ip6_tnl *t = netdev_priv(dev);
863 const struct iphdr *iph = ip_hdr(skb);
864 int encap_limit = -1;
865 struct flowi6 fl6;
866 __u8 dsfield;
867 __u32 mtu;
868 int err;
869
870 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
871 encap_limit = t->parms.encap_limit;
872
873 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
874 fl6.flowi6_proto = IPPROTO_IPIP;
875
876 dsfield = ipv4_get_dsfield(iph);
877
878 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
879 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
880 & IPV6_TCLASS_MASK;
881 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
882 fl6.flowi6_mark = skb->mark;
883
884 err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
885 if (err != 0) {
886 /* XXX: send ICMP error even if DF is not set. */
887 if (err == -EMSGSIZE)
888 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
889 htonl(mtu));
890 return -1;
891 }
892
893 return 0;
894}
895
896static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
897{
898 struct ip6_tnl *t = netdev_priv(dev);
899 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
900 int encap_limit = -1;
901 __u16 offset;
902 struct flowi6 fl6;
903 __u8 dsfield;
904 __u32 mtu;
905 int err;
906
907 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
908 return -1;
909
910 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
911 if (offset > 0) {
912 struct ipv6_tlv_tnl_enc_lim *tel;
913 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
914 if (tel->encap_limit == 0) {
915 icmpv6_send(skb, ICMPV6_PARAMPROB,
916 ICMPV6_HDR_FIELD, offset + 2);
917 return -1;
918 }
919 encap_limit = tel->encap_limit - 1;
920 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
921 encap_limit = t->parms.encap_limit;
922
923 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
924 fl6.flowi6_proto = IPPROTO_IPV6;
925
926 dsfield = ipv6_get_dsfield(ipv6h);
927 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
928 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
929 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
930 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
931 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
932 fl6.flowi6_mark = skb->mark;
933
934 err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
935 if (err != 0) {
936 if (err == -EMSGSIZE)
937 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
938 return -1;
939 }
940
941 return 0;
942}
943
944/**
945 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
946 * @t: the outgoing tunnel device
947 * @hdr: IPv6 header from the incoming packet
948 *
949 * Description:
950 * Avoid trivial tunneling loop by checking that tunnel exit-point
951 * doesn't match source of incoming packet.
952 *
953 * Return:
954 * 1 if conflict,
955 * 0 else
956 **/
957
958static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
959 const struct ipv6hdr *hdr)
960{
961 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
962}
963
964static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
965{
966 struct ip6_tnl *t = netdev_priv(dev);
967 int encap_limit = -1;
968 struct flowi6 fl6;
969 __u32 mtu;
970 int err;
971
972 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
973 encap_limit = t->parms.encap_limit;
974
975 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
976 fl6.flowi6_proto = skb->protocol;
977
978 err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
979
980 return err;
981}
982
983static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
984 struct net_device *dev)
985{
986 struct ip6_tnl *t = netdev_priv(dev);
987 struct net_device_stats *stats = &t->dev->stats;
988 int ret;
989
990 if (!ip6_tnl_xmit_ctl(t))
991 return -1;
992
993 switch (skb->protocol) {
994 case htons(ETH_P_IP):
995 ret = ip6gre_xmit_ipv4(skb, dev);
996 break;
997 case htons(ETH_P_IPV6):
998 ret = ip6gre_xmit_ipv6(skb, dev);
999 break;
1000 default:
1001 ret = ip6gre_xmit_other(skb, dev);
1002 break;
1003 }
1004
1005 if (ret < 0)
1006 goto tx_err;
1007
1008 return NETDEV_TX_OK;
1009
1010tx_err:
1011 stats->tx_errors++;
1012 stats->tx_dropped++;
1013 kfree_skb(skb);
1014 return NETDEV_TX_OK;
1015}
1016
1017static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1018{
1019 struct net_device *dev = t->dev;
1020 struct __ip6_tnl_parm *p = &t->parms;
1021 struct flowi6 *fl6 = &t->fl.u.ip6;
1022 int addend = sizeof(struct ipv6hdr) + 4;
1023
1024 if (dev->type != ARPHRD_ETHER) {
1025 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1026 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1027 }
1028
1029 /* Set up flowi template */
1030 fl6->saddr = p->laddr;
1031 fl6->daddr = p->raddr;
1032 fl6->flowi6_oif = p->link;
1033 fl6->flowlabel = 0;
1034
1035 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1036 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1037 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1038 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1039
1040 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1041 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1042
1043 if (p->flags&IP6_TNL_F_CAP_XMIT &&
1044 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
1045 dev->flags |= IFF_POINTOPOINT;
1046 else
1047 dev->flags &= ~IFF_POINTOPOINT;
1048
1049 dev->iflink = p->link;
1050
1051 /* Precalculate GRE options length */
1052 if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1053 if (t->parms.o_flags&GRE_CSUM)
1054 addend += 4;
1055 if (t->parms.o_flags&GRE_KEY)
1056 addend += 4;
1057 if (t->parms.o_flags&GRE_SEQ)
1058 addend += 4;
1059 }
1060
1061 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1062 int strict = (ipv6_addr_type(&p->raddr) &
1063 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1064
1065 struct rt6_info *rt = rt6_lookup(dev_net(dev),
1066 &p->raddr, &p->laddr,
1067 p->link, strict);
1068
1069 if (rt == NULL)
1070 return;
1071
1072 if (rt->dst.dev) {
1073 dev->hard_header_len = rt->dst.dev->hard_header_len + addend;
1074
1075 if (set_mtu) {
1076 dev->mtu = rt->dst.dev->mtu - addend;
1077 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1078 dev->mtu -= 8;
1079
1080 if (dev->mtu < IPV6_MIN_MTU)
1081 dev->mtu = IPV6_MIN_MTU;
1082 }
1083 }
1084 dst_release(&rt->dst);
1085 }
1086
1087 t->hlen = addend;
1088}
1089
1090static int ip6gre_tnl_change(struct ip6_tnl *t,
1091 const struct __ip6_tnl_parm *p, int set_mtu)
1092{
1093 t->parms.laddr = p->laddr;
1094 t->parms.raddr = p->raddr;
1095 t->parms.flags = p->flags;
1096 t->parms.hop_limit = p->hop_limit;
1097 t->parms.encap_limit = p->encap_limit;
1098 t->parms.flowinfo = p->flowinfo;
1099 t->parms.link = p->link;
1100 t->parms.proto = p->proto;
1101 t->parms.i_key = p->i_key;
1102 t->parms.o_key = p->o_key;
1103 t->parms.i_flags = p->i_flags;
1104 t->parms.o_flags = p->o_flags;
1105 ip6_tnl_dst_reset(t);
1106 ip6gre_tnl_link_config(t, set_mtu);
1107 return 0;
1108}
1109
1110static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
1111 const struct ip6_tnl_parm2 *u)
1112{
1113 p->laddr = u->laddr;
1114 p->raddr = u->raddr;
1115 p->flags = u->flags;
1116 p->hop_limit = u->hop_limit;
1117 p->encap_limit = u->encap_limit;
1118 p->flowinfo = u->flowinfo;
1119 p->link = u->link;
1120 p->i_key = u->i_key;
1121 p->o_key = u->o_key;
1122 p->i_flags = u->i_flags;
1123 p->o_flags = u->o_flags;
1124 memcpy(p->name, u->name, sizeof(u->name));
1125}
1126
1127static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
1128 const struct __ip6_tnl_parm *p)
1129{
1130 u->proto = IPPROTO_GRE;
1131 u->laddr = p->laddr;
1132 u->raddr = p->raddr;
1133 u->flags = p->flags;
1134 u->hop_limit = p->hop_limit;
1135 u->encap_limit = p->encap_limit;
1136 u->flowinfo = p->flowinfo;
1137 u->link = p->link;
1138 u->i_key = p->i_key;
1139 u->o_key = p->o_key;
1140 u->i_flags = p->i_flags;
1141 u->o_flags = p->o_flags;
1142 memcpy(u->name, p->name, sizeof(u->name));
1143}
1144
1145static int ip6gre_tunnel_ioctl(struct net_device *dev,
1146 struct ifreq *ifr, int cmd)
1147{
1148 int err = 0;
1149 struct ip6_tnl_parm2 p;
1150 struct __ip6_tnl_parm p1;
1151 struct ip6_tnl *t;
1152 struct net *net = dev_net(dev);
1153 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1154
1155 switch (cmd) {
1156 case SIOCGETTUNNEL:
1157 t = NULL;
1158 if (dev == ign->fb_tunnel_dev) {
1159 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1160 err = -EFAULT;
1161 break;
1162 }
1163 ip6gre_tnl_parm_from_user(&p1, &p);
1164 t = ip6gre_tunnel_locate(net, &p1, 0);
1165 }
1166 if (t == NULL)
1167 t = netdev_priv(dev);
1168 ip6gre_tnl_parm_to_user(&p, &t->parms);
1169 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1170 err = -EFAULT;
1171 break;
1172
1173 case SIOCADDTUNNEL:
1174 case SIOCCHGTUNNEL:
1175 err = -EPERM;
1176 if (!capable(CAP_NET_ADMIN))
1177 goto done;
1178
1179 err = -EFAULT;
1180 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1181 goto done;
1182
1183 err = -EINVAL;
1184 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
1185 goto done;
1186
1187 if (!(p.i_flags&GRE_KEY))
1188 p.i_key = 0;
1189 if (!(p.o_flags&GRE_KEY))
1190 p.o_key = 0;
1191
1192 ip6gre_tnl_parm_from_user(&p1, &p);
1193 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
1194
1195 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1196 if (t != NULL) {
1197 if (t->dev != dev) {
1198 err = -EEXIST;
1199 break;
1200 }
1201 } else {
1202 t = netdev_priv(dev);
1203
1204 ip6gre_tunnel_unlink(ign, t);
1205 synchronize_net();
1206 ip6gre_tnl_change(t, &p1, 1);
1207 ip6gre_tunnel_link(ign, t);
1208 netdev_state_change(dev);
1209 }
1210 }
1211
1212 if (t) {
1213 err = 0;
1214
1215 ip6gre_tnl_parm_to_user(&p, &t->parms);
1216 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1217 err = -EFAULT;
1218 } else
1219 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1220 break;
1221
1222 case SIOCDELTUNNEL:
1223 err = -EPERM;
1224 if (!capable(CAP_NET_ADMIN))
1225 goto done;
1226
1227 if (dev == ign->fb_tunnel_dev) {
1228 err = -EFAULT;
1229 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1230 goto done;
1231 err = -ENOENT;
1232 ip6gre_tnl_parm_from_user(&p1, &p);
1233 t = ip6gre_tunnel_locate(net, &p1, 0);
1234 if (t == NULL)
1235 goto done;
1236 err = -EPERM;
1237 if (t == netdev_priv(ign->fb_tunnel_dev))
1238 goto done;
1239 dev = t->dev;
1240 }
1241 unregister_netdevice(dev);
1242 err = 0;
1243 break;
1244
1245 default:
1246 err = -EINVAL;
1247 }
1248
1249done:
1250 return err;
1251}
1252
1253static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1254{
1255 struct ip6_tnl *tunnel = netdev_priv(dev);
1256 if (new_mtu < 68 ||
1257 new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1258 return -EINVAL;
1259 dev->mtu = new_mtu;
1260 return 0;
1261}
1262
1263static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
1264 unsigned short type,
1265 const void *daddr, const void *saddr, unsigned int len)
1266{
1267 struct ip6_tnl *t = netdev_priv(dev);
1268 struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
1269 __be16 *p = (__be16 *)(ipv6h+1);
1270
1271 *(__be32 *)ipv6h = t->fl.u.ip6.flowlabel | htonl(0x60000000);
1272 ipv6h->hop_limit = t->parms.hop_limit;
1273 ipv6h->nexthdr = NEXTHDR_GRE;
1274 ipv6h->saddr = t->parms.laddr;
1275 ipv6h->daddr = t->parms.raddr;
1276
1277 p[0] = t->parms.o_flags;
1278 p[1] = htons(type);
1279
1280 /*
1281 * Set the source hardware address.
1282 */
1283
1284 if (saddr)
1285 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
1286 if (daddr)
1287 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
1288 if (!ipv6_addr_any(&ipv6h->daddr))
1289 return t->hlen;
1290
1291 return -t->hlen;
1292}
1293
1294static int ip6gre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1295{
1296 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb_mac_header(skb);
1297 memcpy(haddr, &ipv6h->saddr, sizeof(struct in6_addr));
1298 return sizeof(struct in6_addr);
1299}
1300
1301static const struct header_ops ip6gre_header_ops = {
1302 .create = ip6gre_header,
1303 .parse = ip6gre_header_parse,
1304};
1305
1306static const struct net_device_ops ip6gre_netdev_ops = {
1307 .ndo_init = ip6gre_tunnel_init,
1308 .ndo_uninit = ip6gre_tunnel_uninit,
1309 .ndo_start_xmit = ip6gre_tunnel_xmit,
1310 .ndo_do_ioctl = ip6gre_tunnel_ioctl,
1311 .ndo_change_mtu = ip6gre_tunnel_change_mtu,
1312 .ndo_get_stats64 = ip6gre_get_stats64,
1313};
1314
1315static void ip6gre_dev_free(struct net_device *dev)
1316{
1317 free_percpu(dev->tstats);
1318 free_netdev(dev);
1319}
1320
1321static void ip6gre_tunnel_setup(struct net_device *dev)
1322{
1323 struct ip6_tnl *t;
1324
1325 dev->netdev_ops = &ip6gre_netdev_ops;
1326 dev->destructor = ip6gre_dev_free;
1327
1328 dev->type = ARPHRD_IP6GRE;
1329 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr) + 4;
1330 dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr) - 4;
1331 t = netdev_priv(dev);
1332 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1333 dev->mtu -= 8;
1334 dev->flags |= IFF_NOARP;
1335 dev->iflink = 0;
1336 dev->addr_len = sizeof(struct in6_addr);
1337 dev->features |= NETIF_F_NETNS_LOCAL;
1338 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1339}
1340
1341static int ip6gre_tunnel_init(struct net_device *dev)
1342{
1343 struct ip6_tnl *tunnel;
1344
1345 tunnel = netdev_priv(dev);
1346
1347 tunnel->dev = dev;
1348 strcpy(tunnel->parms.name, dev->name);
1349
1350 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
1351 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
1352
1353 if (ipv6_addr_any(&tunnel->parms.raddr))
1354 dev->header_ops = &ip6gre_header_ops;
1355
1356 dev->tstats = alloc_percpu(struct pcpu_tstats);
1357 if (!dev->tstats)
1358 return -ENOMEM;
1359
1360 return 0;
1361}
1362
1363static void ip6gre_fb_tunnel_init(struct net_device *dev)
1364{
1365 struct ip6_tnl *tunnel = netdev_priv(dev);
1366
1367 tunnel->dev = dev;
1368 strcpy(tunnel->parms.name, dev->name);
1369
1370 tunnel->hlen = sizeof(struct ipv6hdr) + 4;
1371
1372 dev_hold(dev);
1373}
1374
1375
1376static struct inet6_protocol ip6gre_protocol __read_mostly = {
1377 .handler = ip6gre_rcv,
1378 .err_handler = ip6gre_err,
1379 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1380};
1381
1382static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
1383 struct list_head *head)
1384{
1385 int prio;
1386
1387 for (prio = 0; prio < 4; prio++) {
1388 int h;
1389 for (h = 0; h < HASH_SIZE; h++) {
1390 struct ip6_tnl *t;
1391
1392 t = rtnl_dereference(ign->tunnels[prio][h]);
1393
1394 while (t != NULL) {
1395 unregister_netdevice_queue(t->dev, head);
1396 t = rtnl_dereference(t->next);
1397 }
1398 }
1399 }
1400}
1401
1402static int __net_init ip6gre_init_net(struct net *net)
1403{
1404 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1405 int err;
1406
1407 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
1408 ip6gre_tunnel_setup);
1409 if (!ign->fb_tunnel_dev) {
1410 err = -ENOMEM;
1411 goto err_alloc_dev;
1412 }
1413 dev_net_set(ign->fb_tunnel_dev, net);
1414
1415 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1416 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
1417
1418 err = register_netdev(ign->fb_tunnel_dev);
1419 if (err)
1420 goto err_reg_dev;
1421
1422 rcu_assign_pointer(ign->tunnels_wc[0],
1423 netdev_priv(ign->fb_tunnel_dev));
1424 return 0;
1425
1426err_reg_dev:
1427 ip6gre_dev_free(ign->fb_tunnel_dev);
1428err_alloc_dev:
1429 return err;
1430}
1431
1432static void __net_exit ip6gre_exit_net(struct net *net)
1433{
1434 struct ip6gre_net *ign;
1435 LIST_HEAD(list);
1436
1437 ign = net_generic(net, ip6gre_net_id);
1438 rtnl_lock();
1439 ip6gre_destroy_tunnels(ign, &list);
1440 unregister_netdevice_many(&list);
1441 rtnl_unlock();
1442}
1443
1444static struct pernet_operations ip6gre_net_ops = {
1445 .init = ip6gre_init_net,
1446 .exit = ip6gre_exit_net,
1447 .id = &ip6gre_net_id,
1448 .size = sizeof(struct ip6gre_net),
1449};
1450
1451static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1452{
1453 __be16 flags;
1454
1455 if (!data)
1456 return 0;
1457
1458 flags = 0;
1459 if (data[IFLA_GRE_IFLAGS])
1460 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1461 if (data[IFLA_GRE_OFLAGS])
1462 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1463 if (flags & (GRE_VERSION|GRE_ROUTING))
1464 return -EINVAL;
1465
1466 return 0;
1467}
1468
1469static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1470{
1471 struct in6_addr daddr;
1472
1473 if (tb[IFLA_ADDRESS]) {
1474 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1475 return -EINVAL;
1476 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1477 return -EADDRNOTAVAIL;
1478 }
1479
1480 if (!data)
1481 goto out;
1482
1483 if (data[IFLA_GRE_REMOTE]) {
1484 nla_memcpy(&daddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
1485 if (ipv6_addr_any(&daddr))
1486 return -EINVAL;
1487 }
1488
1489out:
1490 return ip6gre_tunnel_validate(tb, data);
1491}
1492
1493
1494static void ip6gre_netlink_parms(struct nlattr *data[],
1495 struct __ip6_tnl_parm *parms)
1496{
1497 memset(parms, 0, sizeof(*parms));
1498
1499 if (!data)
1500 return;
1501
1502 if (data[IFLA_GRE_LINK])
1503 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1504
1505 if (data[IFLA_GRE_IFLAGS])
1506 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1507
1508 if (data[IFLA_GRE_OFLAGS])
1509 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1510
1511 if (data[IFLA_GRE_IKEY])
1512 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1513
1514 if (data[IFLA_GRE_OKEY])
1515 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1516
1517 if (data[IFLA_GRE_LOCAL])
1518 nla_memcpy(&parms->laddr, data[IFLA_GRE_LOCAL], sizeof(struct in6_addr));
1519
1520 if (data[IFLA_GRE_REMOTE])
1521 nla_memcpy(&parms->raddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
1522
1523 if (data[IFLA_GRE_TTL])
1524 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
1525
1526 if (data[IFLA_GRE_ENCAP_LIMIT])
1527 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
1528
1529 if (data[IFLA_GRE_FLOWINFO])
1530 parms->flowinfo = nla_get_u32(data[IFLA_GRE_FLOWINFO]);
1531
1532 if (data[IFLA_GRE_FLAGS])
1533 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
1534}
1535
1536static int ip6gre_tap_init(struct net_device *dev)
1537{
1538 struct ip6_tnl *tunnel;
1539
1540 tunnel = netdev_priv(dev);
1541
1542 tunnel->dev = dev;
1543 strcpy(tunnel->parms.name, dev->name);
1544
1545 ip6gre_tnl_link_config(tunnel, 1);
1546
1547 dev->tstats = alloc_percpu(struct pcpu_tstats);
1548 if (!dev->tstats)
1549 return -ENOMEM;
1550
1551 return 0;
1552}
1553
1554static const struct net_device_ops ip6gre_tap_netdev_ops = {
1555 .ndo_init = ip6gre_tap_init,
1556 .ndo_uninit = ip6gre_tunnel_uninit,
1557 .ndo_start_xmit = ip6gre_tunnel_xmit,
1558 .ndo_set_mac_address = eth_mac_addr,
1559 .ndo_validate_addr = eth_validate_addr,
1560 .ndo_change_mtu = ip6gre_tunnel_change_mtu,
1561 .ndo_get_stats64 = ip6gre_get_stats64,
1562};
1563
1564static void ip6gre_tap_setup(struct net_device *dev)
1565{
1566
1567 ether_setup(dev);
1568
1569 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1570 dev->destructor = ip6gre_dev_free;
1571
1572 dev->iflink = 0;
1573 dev->features |= NETIF_F_NETNS_LOCAL;
1574}
1575
1576static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1577 struct nlattr *tb[], struct nlattr *data[])
1578{
1579 struct ip6_tnl *nt;
1580 struct net *net = dev_net(dev);
1581 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1582 int err;
1583
1584 nt = netdev_priv(dev);
1585 ip6gre_netlink_parms(data, &nt->parms);
1586
1587 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
1588 return -EEXIST;
1589
1590 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1591 eth_hw_addr_random(dev);
1592
1593 nt->dev = dev;
1594 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1595
1596 /* Can use a lockless transmit, unless we generate output sequences */
1597 if (!(nt->parms.o_flags & GRE_SEQ))
1598 dev->features |= NETIF_F_LLTX;
1599
1600 err = register_netdevice(dev);
1601 if (err)
1602 goto out;
1603
1604 dev_hold(dev);
1605 ip6gre_tunnel_link(ign, nt);
1606
1607out:
1608 return err;
1609}
1610
1611static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
1612 struct nlattr *data[])
1613{
1614 struct ip6_tnl *t, *nt;
1615 struct net *net = dev_net(dev);
1616 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1617 struct __ip6_tnl_parm p;
1618
1619 if (dev == ign->fb_tunnel_dev)
1620 return -EINVAL;
1621
1622 nt = netdev_priv(dev);
1623 ip6gre_netlink_parms(data, &p);
1624
1625 t = ip6gre_tunnel_locate(net, &p, 0);
1626
1627 if (t) {
1628 if (t->dev != dev)
1629 return -EEXIST;
1630 } else {
1631 t = nt;
1632
1633 ip6gre_tunnel_unlink(ign, t);
1634 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
1635 ip6gre_tunnel_link(ign, t);
1636 netdev_state_change(dev);
1637 }
1638
1639 return 0;
1640}
1641
1642static size_t ip6gre_get_size(const struct net_device *dev)
1643{
1644 return
1645 /* IFLA_GRE_LINK */
1646 nla_total_size(4) +
1647 /* IFLA_GRE_IFLAGS */
1648 nla_total_size(2) +
1649 /* IFLA_GRE_OFLAGS */
1650 nla_total_size(2) +
1651 /* IFLA_GRE_IKEY */
1652 nla_total_size(4) +
1653 /* IFLA_GRE_OKEY */
1654 nla_total_size(4) +
1655 /* IFLA_GRE_LOCAL */
1656 nla_total_size(4) +
1657 /* IFLA_GRE_REMOTE */
1658 nla_total_size(4) +
1659 /* IFLA_GRE_TTL */
1660 nla_total_size(1) +
1661 /* IFLA_GRE_TOS */
1662 nla_total_size(1) +
1663 /* IFLA_GRE_ENCAP_LIMIT */
1664 nla_total_size(1) +
1665 /* IFLA_GRE_FLOWINFO */
1666 nla_total_size(4) +
1667 /* IFLA_GRE_FLAGS */
1668 nla_total_size(4) +
1669 0;
1670}
1671
1672static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1673{
1674 struct ip6_tnl *t = netdev_priv(dev);
1675 struct __ip6_tnl_parm *p = &t->parms;
1676
1677 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1678 nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
1679 nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
1680 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1681 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1682 nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) ||
1683 nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) ||
1684 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
1685 /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
1686 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
1687 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
1688 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags))
1689 goto nla_put_failure;
1690 return 0;
1691
1692nla_put_failure:
1693 return -EMSGSIZE;
1694}
1695
1696static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
1697 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1698 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1699 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1700 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1701 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1702 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
1703 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
1704 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1705 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
1706 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
1707 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
1708};
1709
1710static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
1711 .kind = "ip6gre",
1712 .maxtype = IFLA_GRE_MAX,
1713 .policy = ip6gre_policy,
1714 .priv_size = sizeof(struct ip6_tnl),
1715 .setup = ip6gre_tunnel_setup,
1716 .validate = ip6gre_tunnel_validate,
1717 .newlink = ip6gre_newlink,
1718 .changelink = ip6gre_changelink,
1719 .get_size = ip6gre_get_size,
1720 .fill_info = ip6gre_fill_info,
1721};
1722
1723static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
1724 .kind = "ip6gretap",
1725 .maxtype = IFLA_GRE_MAX,
1726 .policy = ip6gre_policy,
1727 .priv_size = sizeof(struct ip6_tnl),
1728 .setup = ip6gre_tap_setup,
1729 .validate = ip6gre_tap_validate,
1730 .newlink = ip6gre_newlink,
1731 .changelink = ip6gre_changelink,
1732 .get_size = ip6gre_get_size,
1733 .fill_info = ip6gre_fill_info,
1734};
1735
1736/*
1737 * And now the modules code and kernel interface.
1738 */
1739
1740static int __init ip6gre_init(void)
1741{
1742 int err;
1743
1744 pr_info("GRE over IPv6 tunneling driver\n");
1745
1746 err = register_pernet_device(&ip6gre_net_ops);
1747 if (err < 0)
1748 return err;
1749
1750 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
1751 if (err < 0) {
1752 pr_info("%s: can't add protocol\n", __func__);
1753 goto add_proto_failed;
1754 }
1755
1756 err = rtnl_link_register(&ip6gre_link_ops);
1757 if (err < 0)
1758 goto rtnl_link_failed;
1759
1760 err = rtnl_link_register(&ip6gre_tap_ops);
1761 if (err < 0)
1762 goto tap_ops_failed;
1763
1764out:
1765 return err;
1766
1767tap_ops_failed:
1768 rtnl_link_unregister(&ip6gre_link_ops);
1769rtnl_link_failed:
1770 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
1771add_proto_failed:
1772 unregister_pernet_device(&ip6gre_net_ops);
1773 goto out;
1774}
1775
1776static void __exit ip6gre_fini(void)
1777{
1778 rtnl_link_unregister(&ip6gre_tap_ops);
1779 rtnl_link_unregister(&ip6gre_link_ops);
1780 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
1781 unregister_pernet_device(&ip6gre_net_ops);
1782}
1783
1784module_init(ip6gre_init);
1785module_exit(ip6gre_fini);
1786MODULE_LICENSE("GPL");
1787MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
1788MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
1789MODULE_ALIAS_RTNL_LINK("ip6gre");
1790MODULE_ALIAS_NETDEV("ip6gre0");
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9a1d5fe6aef8..33d2a0e6712d 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -126,7 +126,7 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
126 * Locking : hash tables are protected by RCU and RTNL 126 * Locking : hash tables are protected by RCU and RTNL
127 */ 127 */
128 128
129static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) 129struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
130{ 130{
131 struct dst_entry *dst = t->dst_cache; 131 struct dst_entry *dst = t->dst_cache;
132 132
@@ -139,20 +139,23 @@ static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
139 139
140 return dst; 140 return dst;
141} 141}
142EXPORT_SYMBOL_GPL(ip6_tnl_dst_check);
142 143
143static inline void ip6_tnl_dst_reset(struct ip6_tnl *t) 144void ip6_tnl_dst_reset(struct ip6_tnl *t)
144{ 145{
145 dst_release(t->dst_cache); 146 dst_release(t->dst_cache);
146 t->dst_cache = NULL; 147 t->dst_cache = NULL;
147} 148}
149EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
148 150
149static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) 151void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
150{ 152{
151 struct rt6_info *rt = (struct rt6_info *) dst; 153 struct rt6_info *rt = (struct rt6_info *) dst;
152 t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 154 t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
153 dst_release(t->dst_cache); 155 dst_release(t->dst_cache);
154 t->dst_cache = dst; 156 t->dst_cache = dst;
155} 157}
158EXPORT_SYMBOL_GPL(ip6_tnl_dst_store);
156 159
157/** 160/**
158 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses 161 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
@@ -200,7 +203,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
200 **/ 203 **/
201 204
202static struct ip6_tnl __rcu ** 205static struct ip6_tnl __rcu **
203ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p) 206ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
204{ 207{
205 const struct in6_addr *remote = &p->raddr; 208 const struct in6_addr *remote = &p->raddr;
206 const struct in6_addr *local = &p->laddr; 209 const struct in6_addr *local = &p->laddr;
@@ -267,7 +270,7 @@ static void ip6_dev_free(struct net_device *dev)
267 * created tunnel or NULL 270 * created tunnel or NULL
268 **/ 271 **/
269 272
270static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p) 273static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
271{ 274{
272 struct net_device *dev; 275 struct net_device *dev;
273 struct ip6_tnl *t; 276 struct ip6_tnl *t;
@@ -322,7 +325,7 @@ failed:
322 **/ 325 **/
323 326
324static struct ip6_tnl *ip6_tnl_locate(struct net *net, 327static struct ip6_tnl *ip6_tnl_locate(struct net *net,
325 struct ip6_tnl_parm *p, int create) 328 struct __ip6_tnl_parm *p, int create)
326{ 329{
327 const struct in6_addr *remote = &p->raddr; 330 const struct in6_addr *remote = &p->raddr;
328 const struct in6_addr *local = &p->laddr; 331 const struct in6_addr *local = &p->laddr;
@@ -374,8 +377,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
374 * else index to encapsulation limit 377 * else index to encapsulation limit
375 **/ 378 **/
376 379
377static __u16 380__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
378parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
379{ 381{
380 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; 382 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
381 __u8 nexthdr = ipv6h->nexthdr; 383 __u8 nexthdr = ipv6h->nexthdr;
@@ -425,6 +427,7 @@ parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
425 } 427 }
426 return 0; 428 return 0;
427} 429}
430EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
428 431
429/** 432/**
430 * ip6_tnl_err - tunnel error handler 433 * ip6_tnl_err - tunnel error handler
@@ -480,7 +483,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
480 case ICMPV6_PARAMPROB: 483 case ICMPV6_PARAMPROB:
481 teli = 0; 484 teli = 0;
482 if ((*code) == ICMPV6_HDR_FIELD) 485 if ((*code) == ICMPV6_HDR_FIELD)
483 teli = parse_tlv_tnl_enc_lim(skb, skb->data); 486 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
484 487
485 if (teli && teli == *info - 2) { 488 if (teli && teli == *info - 2) {
486 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 489 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
@@ -693,11 +696,11 @@ static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
693 IP6_ECN_set_ce(ipv6_hdr(skb)); 696 IP6_ECN_set_ce(ipv6_hdr(skb));
694} 697}
695 698
696static __u32 ip6_tnl_get_cap(struct ip6_tnl *t, 699__u32 ip6_tnl_get_cap(struct ip6_tnl *t,
697 const struct in6_addr *laddr, 700 const struct in6_addr *laddr,
698 const struct in6_addr *raddr) 701 const struct in6_addr *raddr)
699{ 702{
700 struct ip6_tnl_parm *p = &t->parms; 703 struct __ip6_tnl_parm *p = &t->parms;
701 int ltype = ipv6_addr_type(laddr); 704 int ltype = ipv6_addr_type(laddr);
702 int rtype = ipv6_addr_type(raddr); 705 int rtype = ipv6_addr_type(raddr);
703 __u32 flags = 0; 706 __u32 flags = 0;
@@ -715,13 +718,14 @@ static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
715 } 718 }
716 return flags; 719 return flags;
717} 720}
721EXPORT_SYMBOL(ip6_tnl_get_cap);
718 722
719/* called with rcu_read_lock() */ 723/* called with rcu_read_lock() */
720static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t, 724int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
721 const struct in6_addr *laddr, 725 const struct in6_addr *laddr,
722 const struct in6_addr *raddr) 726 const struct in6_addr *raddr)
723{ 727{
724 struct ip6_tnl_parm *p = &t->parms; 728 struct __ip6_tnl_parm *p = &t->parms;
725 int ret = 0; 729 int ret = 0;
726 struct net *net = dev_net(t->dev); 730 struct net *net = dev_net(t->dev);
727 731
@@ -740,6 +744,7 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
740 } 744 }
741 return ret; 745 return ret;
742} 746}
747EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
743 748
744/** 749/**
745 * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally 750 * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
@@ -859,9 +864,9 @@ ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
859 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 864 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
860} 865}
861 866
862static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t) 867int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
863{ 868{
864 struct ip6_tnl_parm *p = &t->parms; 869 struct __ip6_tnl_parm *p = &t->parms;
865 int ret = 0; 870 int ret = 0;
866 struct net *net = dev_net(t->dev); 871 struct net *net = dev_net(t->dev);
867 872
@@ -885,6 +890,8 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
885 } 890 }
886 return ret; 891 return ret;
887} 892}
893EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
894
888/** 895/**
889 * ip6_tnl_xmit2 - encapsulate packet and send 896 * ip6_tnl_xmit2 - encapsulate packet and send
890 * @skb: the outgoing socket buffer 897 * @skb: the outgoing socket buffer
@@ -1085,7 +1092,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1085 !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h)) 1092 !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
1086 return -1; 1093 return -1;
1087 1094
1088 offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb)); 1095 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1089 if (offset > 0) { 1096 if (offset > 0) {
1090 struct ipv6_tlv_tnl_enc_lim *tel; 1097 struct ipv6_tlv_tnl_enc_lim *tel;
1091 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 1098 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
@@ -1152,7 +1159,7 @@ tx_err:
1152static void ip6_tnl_link_config(struct ip6_tnl *t) 1159static void ip6_tnl_link_config(struct ip6_tnl *t)
1153{ 1160{
1154 struct net_device *dev = t->dev; 1161 struct net_device *dev = t->dev;
1155 struct ip6_tnl_parm *p = &t->parms; 1162 struct __ip6_tnl_parm *p = &t->parms;
1156 struct flowi6 *fl6 = &t->fl.u.ip6; 1163 struct flowi6 *fl6 = &t->fl.u.ip6;
1157 1164
1158 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1165 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
@@ -1215,7 +1222,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
1215 **/ 1222 **/
1216 1223
1217static int 1224static int
1218ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p) 1225ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1219{ 1226{
1220 t->parms.laddr = p->laddr; 1227 t->parms.laddr = p->laddr;
1221 t->parms.raddr = p->raddr; 1228 t->parms.raddr = p->raddr;
@@ -1230,6 +1237,34 @@ ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
1230 return 0; 1237 return 0;
1231} 1238}
1232 1239
1240static void
1241ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1242{
1243 p->laddr = u->laddr;
1244 p->raddr = u->raddr;
1245 p->flags = u->flags;
1246 p->hop_limit = u->hop_limit;
1247 p->encap_limit = u->encap_limit;
1248 p->flowinfo = u->flowinfo;
1249 p->link = u->link;
1250 p->proto = u->proto;
1251 memcpy(p->name, u->name, sizeof(u->name));
1252}
1253
1254static void
1255ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1256{
1257 u->laddr = p->laddr;
1258 u->raddr = p->raddr;
1259 u->flags = p->flags;
1260 u->hop_limit = p->hop_limit;
1261 u->encap_limit = p->encap_limit;
1262 u->flowinfo = p->flowinfo;
1263 u->link = p->link;
1264 u->proto = p->proto;
1265 memcpy(u->name, p->name, sizeof(u->name));
1266}
1267
1233/** 1268/**
1234 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace 1269 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1235 * @dev: virtual device associated with tunnel 1270 * @dev: virtual device associated with tunnel
@@ -1263,6 +1298,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1263{ 1298{
1264 int err = 0; 1299 int err = 0;
1265 struct ip6_tnl_parm p; 1300 struct ip6_tnl_parm p;
1301 struct __ip6_tnl_parm p1;
1266 struct ip6_tnl *t = NULL; 1302 struct ip6_tnl *t = NULL;
1267 struct net *net = dev_net(dev); 1303 struct net *net = dev_net(dev);
1268 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1304 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
@@ -1274,11 +1310,12 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1274 err = -EFAULT; 1310 err = -EFAULT;
1275 break; 1311 break;
1276 } 1312 }
1277 t = ip6_tnl_locate(net, &p, 0); 1313 ip6_tnl_parm_from_user(&p1, &p);
1314 t = ip6_tnl_locate(net, &p1, 0);
1278 } 1315 }
1279 if (t == NULL) 1316 if (t == NULL)
1280 t = netdev_priv(dev); 1317 t = netdev_priv(dev);
1281 memcpy(&p, &t->parms, sizeof (p)); 1318 ip6_tnl_parm_to_user(&p, &t->parms);
1282 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { 1319 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
1283 err = -EFAULT; 1320 err = -EFAULT;
1284 } 1321 }
@@ -1295,7 +1332,8 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1295 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && 1332 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1296 p.proto != 0) 1333 p.proto != 0)
1297 break; 1334 break;
1298 t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL); 1335 ip6_tnl_parm_from_user(&p1, &p);
1336 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1299 if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) { 1337 if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
1300 if (t != NULL) { 1338 if (t != NULL) {
1301 if (t->dev != dev) { 1339 if (t->dev != dev) {
@@ -1307,13 +1345,14 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1307 1345
1308 ip6_tnl_unlink(ip6n, t); 1346 ip6_tnl_unlink(ip6n, t);
1309 synchronize_net(); 1347 synchronize_net();
1310 err = ip6_tnl_change(t, &p); 1348 err = ip6_tnl_change(t, &p1);
1311 ip6_tnl_link(ip6n, t); 1349 ip6_tnl_link(ip6n, t);
1312 netdev_state_change(dev); 1350 netdev_state_change(dev);
1313 } 1351 }
1314 if (t) { 1352 if (t) {
1315 err = 0; 1353 err = 0;
1316 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p))) 1354 ip6_tnl_parm_to_user(&p, &t->parms);
1355 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1317 err = -EFAULT; 1356 err = -EFAULT;
1318 1357
1319 } else 1358 } else
@@ -1329,7 +1368,9 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1329 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) 1368 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
1330 break; 1369 break;
1331 err = -ENOENT; 1370 err = -ENOENT;
1332 if ((t = ip6_tnl_locate(net, &p, 0)) == NULL) 1371 ip6_tnl_parm_from_user(&p1, &p);
1372 t = ip6_tnl_locate(net, &p1, 0);
1373 if (t == NULL)
1333 break; 1374 break;
1334 err = -EPERM; 1375 err = -EPERM;
1335 if (t->dev == ip6n->fb_tnl_dev) 1376 if (t->dev == ip6n->fb_tnl_dev)