diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /net/ipv6/ip6_tunnel.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'net/ipv6/ip6_tunnel.c')
-rw-r--r-- | net/ipv6/ip6_tunnel.c | 293 |
1 files changed, 183 insertions, 110 deletions
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 0fd027f3f47e..36c2842a86b2 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -57,8 +57,7 @@ | |||
57 | MODULE_AUTHOR("Ville Nuorvala"); | 57 | MODULE_AUTHOR("Ville Nuorvala"); |
58 | MODULE_DESCRIPTION("IPv6 tunneling device"); | 58 | MODULE_DESCRIPTION("IPv6 tunneling device"); |
59 | MODULE_LICENSE("GPL"); | 59 | MODULE_LICENSE("GPL"); |
60 | 60 | MODULE_ALIAS_NETDEV("ip6tnl0"); | |
61 | #define IPV6_TLV_TEL_DST_SIZE 8 | ||
62 | 61 | ||
63 | #ifdef IP6_TNL_DEBUG | 62 | #ifdef IP6_TNL_DEBUG |
64 | #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) | 63 | #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) |
@@ -75,7 +74,7 @@ MODULE_LICENSE("GPL"); | |||
75 | (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ | 74 | (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ |
76 | (HASH_SIZE - 1)) | 75 | (HASH_SIZE - 1)) |
77 | 76 | ||
78 | static void ip6_tnl_dev_init(struct net_device *dev); | 77 | static int ip6_tnl_dev_init(struct net_device *dev); |
79 | static void ip6_tnl_dev_setup(struct net_device *dev); | 78 | static void ip6_tnl_dev_setup(struct net_device *dev); |
80 | 79 | ||
81 | static int ip6_tnl_net_id __read_mostly; | 80 | static int ip6_tnl_net_id __read_mostly; |
@@ -83,15 +82,42 @@ struct ip6_tnl_net { | |||
83 | /* the IPv6 tunnel fallback device */ | 82 | /* the IPv6 tunnel fallback device */ |
84 | struct net_device *fb_tnl_dev; | 83 | struct net_device *fb_tnl_dev; |
85 | /* lists for storing tunnels in use */ | 84 | /* lists for storing tunnels in use */ |
86 | struct ip6_tnl *tnls_r_l[HASH_SIZE]; | 85 | struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE]; |
87 | struct ip6_tnl *tnls_wc[1]; | 86 | struct ip6_tnl __rcu *tnls_wc[1]; |
88 | struct ip6_tnl **tnls[2]; | 87 | struct ip6_tnl __rcu **tnls[2]; |
88 | }; | ||
89 | |||
90 | /* often modified stats are per cpu, other are shared (netdev->stats) */ | ||
91 | struct pcpu_tstats { | ||
92 | unsigned long rx_packets; | ||
93 | unsigned long rx_bytes; | ||
94 | unsigned long tx_packets; | ||
95 | unsigned long tx_bytes; | ||
89 | }; | 96 | }; |
90 | 97 | ||
98 | static struct net_device_stats *ip6_get_stats(struct net_device *dev) | ||
99 | { | ||
100 | struct pcpu_tstats sum = { 0 }; | ||
101 | int i; | ||
102 | |||
103 | for_each_possible_cpu(i) { | ||
104 | const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); | ||
105 | |||
106 | sum.rx_packets += tstats->rx_packets; | ||
107 | sum.rx_bytes += tstats->rx_bytes; | ||
108 | sum.tx_packets += tstats->tx_packets; | ||
109 | sum.tx_bytes += tstats->tx_bytes; | ||
110 | } | ||
111 | dev->stats.rx_packets = sum.rx_packets; | ||
112 | dev->stats.rx_bytes = sum.rx_bytes; | ||
113 | dev->stats.tx_packets = sum.tx_packets; | ||
114 | dev->stats.tx_bytes = sum.tx_bytes; | ||
115 | return &dev->stats; | ||
116 | } | ||
117 | |||
91 | /* | 118 | /* |
92 | * Locking : hash tables are protected by RCU and a spinlock | 119 | * Locking : hash tables are protected by RCU and RTNL |
93 | */ | 120 | */ |
94 | static DEFINE_SPINLOCK(ip6_tnl_lock); | ||
95 | 121 | ||
96 | static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) | 122 | static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) |
97 | { | 123 | { |
@@ -136,10 +162,10 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) | |||
136 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | 162 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) |
137 | 163 | ||
138 | static struct ip6_tnl * | 164 | static struct ip6_tnl * |
139 | ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) | 165 | ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local) |
140 | { | 166 | { |
141 | unsigned h0 = HASH(remote); | 167 | unsigned int h0 = HASH(remote); |
142 | unsigned h1 = HASH(local); | 168 | unsigned int h1 = HASH(local); |
143 | struct ip6_tnl *t; | 169 | struct ip6_tnl *t; |
144 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 170 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
145 | 171 | ||
@@ -167,11 +193,11 @@ ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) | |||
167 | * Return: head of IPv6 tunnel list | 193 | * Return: head of IPv6 tunnel list |
168 | **/ | 194 | **/ |
169 | 195 | ||
170 | static struct ip6_tnl ** | 196 | static struct ip6_tnl __rcu ** |
171 | ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p) | 197 | ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p) |
172 | { | 198 | { |
173 | struct in6_addr *remote = &p->raddr; | 199 | const struct in6_addr *remote = &p->raddr; |
174 | struct in6_addr *local = &p->laddr; | 200 | const struct in6_addr *local = &p->laddr; |
175 | unsigned h = 0; | 201 | unsigned h = 0; |
176 | int prio = 0; | 202 | int prio = 0; |
177 | 203 | ||
@@ -190,12 +216,10 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p) | |||
190 | static void | 216 | static void |
191 | ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) | 217 | ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) |
192 | { | 218 | { |
193 | struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms); | 219 | struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms); |
194 | 220 | ||
195 | spin_lock_bh(&ip6_tnl_lock); | 221 | rcu_assign_pointer(t->next , rtnl_dereference(*tp)); |
196 | t->next = *tp; | ||
197 | rcu_assign_pointer(*tp, t); | 222 | rcu_assign_pointer(*tp, t); |
198 | spin_unlock_bh(&ip6_tnl_lock); | ||
199 | } | 223 | } |
200 | 224 | ||
201 | /** | 225 | /** |
@@ -206,18 +230,25 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) | |||
206 | static void | 230 | static void |
207 | ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) | 231 | ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) |
208 | { | 232 | { |
209 | struct ip6_tnl **tp; | 233 | struct ip6_tnl __rcu **tp; |
210 | 234 | struct ip6_tnl *iter; | |
211 | for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) { | 235 | |
212 | if (t == *tp) { | 236 | for (tp = ip6_tnl_bucket(ip6n, &t->parms); |
213 | spin_lock_bh(&ip6_tnl_lock); | 237 | (iter = rtnl_dereference(*tp)) != NULL; |
214 | *tp = t->next; | 238 | tp = &iter->next) { |
215 | spin_unlock_bh(&ip6_tnl_lock); | 239 | if (t == iter) { |
240 | rcu_assign_pointer(*tp, t->next); | ||
216 | break; | 241 | break; |
217 | } | 242 | } |
218 | } | 243 | } |
219 | } | 244 | } |
220 | 245 | ||
246 | static void ip6_dev_free(struct net_device *dev) | ||
247 | { | ||
248 | free_percpu(dev->tstats); | ||
249 | free_netdev(dev); | ||
250 | } | ||
251 | |||
221 | /** | 252 | /** |
222 | * ip6_tnl_create() - create a new tunnel | 253 | * ip6_tnl_create() - create a new tunnel |
223 | * @p: tunnel parameters | 254 | * @p: tunnel parameters |
@@ -249,14 +280,11 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p) | |||
249 | 280 | ||
250 | dev_net_set(dev, net); | 281 | dev_net_set(dev, net); |
251 | 282 | ||
252 | if (strchr(name, '%')) { | ||
253 | if (dev_alloc_name(dev, name) < 0) | ||
254 | goto failed_free; | ||
255 | } | ||
256 | |||
257 | t = netdev_priv(dev); | 283 | t = netdev_priv(dev); |
258 | t->parms = *p; | 284 | t->parms = *p; |
259 | ip6_tnl_dev_init(dev); | 285 | err = ip6_tnl_dev_init(dev); |
286 | if (err < 0) | ||
287 | goto failed_free; | ||
260 | 288 | ||
261 | if ((err = register_netdevice(dev)) < 0) | 289 | if ((err = register_netdevice(dev)) < 0) |
262 | goto failed_free; | 290 | goto failed_free; |
@@ -266,7 +294,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p) | |||
266 | return t; | 294 | return t; |
267 | 295 | ||
268 | failed_free: | 296 | failed_free: |
269 | free_netdev(dev); | 297 | ip6_dev_free(dev); |
270 | failed: | 298 | failed: |
271 | return NULL; | 299 | return NULL; |
272 | } | 300 | } |
@@ -288,12 +316,15 @@ failed: | |||
288 | static struct ip6_tnl *ip6_tnl_locate(struct net *net, | 316 | static struct ip6_tnl *ip6_tnl_locate(struct net *net, |
289 | struct ip6_tnl_parm *p, int create) | 317 | struct ip6_tnl_parm *p, int create) |
290 | { | 318 | { |
291 | struct in6_addr *remote = &p->raddr; | 319 | const struct in6_addr *remote = &p->raddr; |
292 | struct in6_addr *local = &p->laddr; | 320 | const struct in6_addr *local = &p->laddr; |
321 | struct ip6_tnl __rcu **tp; | ||
293 | struct ip6_tnl *t; | 322 | struct ip6_tnl *t; |
294 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 323 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
295 | 324 | ||
296 | for (t = *ip6_tnl_bucket(ip6n, p); t; t = t->next) { | 325 | for (tp = ip6_tnl_bucket(ip6n, p); |
326 | (t = rtnl_dereference(*tp)) != NULL; | ||
327 | tp = &t->next) { | ||
297 | if (ipv6_addr_equal(local, &t->parms.laddr) && | 328 | if (ipv6_addr_equal(local, &t->parms.laddr) && |
298 | ipv6_addr_equal(remote, &t->parms.raddr)) | 329 | ipv6_addr_equal(remote, &t->parms.raddr)) |
299 | return t; | 330 | return t; |
@@ -318,13 +349,10 @@ ip6_tnl_dev_uninit(struct net_device *dev) | |||
318 | struct net *net = dev_net(dev); | 349 | struct net *net = dev_net(dev); |
319 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 350 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
320 | 351 | ||
321 | if (dev == ip6n->fb_tnl_dev) { | 352 | if (dev == ip6n->fb_tnl_dev) |
322 | spin_lock_bh(&ip6_tnl_lock); | 353 | rcu_assign_pointer(ip6n->tnls_wc[0], NULL); |
323 | ip6n->tnls_wc[0] = NULL; | 354 | else |
324 | spin_unlock_bh(&ip6_tnl_lock); | ||
325 | } else { | ||
326 | ip6_tnl_unlink(ip6n, t); | 355 | ip6_tnl_unlink(ip6n, t); |
327 | } | ||
328 | ip6_tnl_dst_reset(t); | 356 | ip6_tnl_dst_reset(t); |
329 | dev_put(dev); | 357 | dev_put(dev); |
330 | } | 358 | } |
@@ -341,7 +369,7 @@ ip6_tnl_dev_uninit(struct net_device *dev) | |||
341 | static __u16 | 369 | static __u16 |
342 | parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw) | 370 | parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw) |
343 | { | 371 | { |
344 | struct ipv6hdr *ipv6h = (struct ipv6hdr *) raw; | 372 | const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; |
345 | __u8 nexthdr = ipv6h->nexthdr; | 373 | __u8 nexthdr = ipv6h->nexthdr; |
346 | __u16 off = sizeof (*ipv6h); | 374 | __u16 off = sizeof (*ipv6h); |
347 | 375 | ||
@@ -402,7 +430,7 @@ static int | |||
402 | ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, | 430 | ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, |
403 | u8 *type, u8 *code, int *msg, __u32 *info, int offset) | 431 | u8 *type, u8 *code, int *msg, __u32 *info, int offset) |
404 | { | 432 | { |
405 | struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data; | 433 | const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data; |
406 | struct ip6_tnl *t; | 434 | struct ip6_tnl *t; |
407 | int rel_msg = 0; | 435 | int rel_msg = 0; |
408 | u8 rel_type = ICMPV6_DEST_UNREACH; | 436 | u8 rel_type = ICMPV6_DEST_UNREACH; |
@@ -502,9 +530,9 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
502 | __u32 rel_info = ntohl(info); | 530 | __u32 rel_info = ntohl(info); |
503 | int err; | 531 | int err; |
504 | struct sk_buff *skb2; | 532 | struct sk_buff *skb2; |
505 | struct iphdr *eiph; | 533 | const struct iphdr *eiph; |
506 | struct flowi fl; | ||
507 | struct rtable *rt; | 534 | struct rtable *rt; |
535 | struct flowi4 fl4; | ||
508 | 536 | ||
509 | err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, | 537 | err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, |
510 | &rel_msg, &rel_info, offset); | 538 | &rel_msg, &rel_info, offset); |
@@ -545,11 +573,11 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
545 | eiph = ip_hdr(skb2); | 573 | eiph = ip_hdr(skb2); |
546 | 574 | ||
547 | /* Try to guess incoming interface */ | 575 | /* Try to guess incoming interface */ |
548 | memset(&fl, 0, sizeof(fl)); | 576 | rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, |
549 | fl.fl4_dst = eiph->saddr; | 577 | eiph->saddr, 0, |
550 | fl.fl4_tos = RT_TOS(eiph->tos); | 578 | 0, 0, |
551 | fl.proto = IPPROTO_IPIP; | 579 | IPPROTO_IPIP, RT_TOS(eiph->tos), 0); |
552 | if (ip_route_output_key(dev_net(skb->dev), &rt, &fl)) | 580 | if (IS_ERR(rt)) |
553 | goto out; | 581 | goto out; |
554 | 582 | ||
555 | skb2->dev = rt->dst.dev; | 583 | skb2->dev = rt->dst.dev; |
@@ -558,15 +586,18 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
558 | if (rt->rt_flags & RTCF_LOCAL) { | 586 | if (rt->rt_flags & RTCF_LOCAL) { |
559 | ip_rt_put(rt); | 587 | ip_rt_put(rt); |
560 | rt = NULL; | 588 | rt = NULL; |
561 | fl.fl4_dst = eiph->daddr; | 589 | rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, |
562 | fl.fl4_src = eiph->saddr; | 590 | eiph->daddr, eiph->saddr, |
563 | fl.fl4_tos = eiph->tos; | 591 | 0, 0, |
564 | if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) || | 592 | IPPROTO_IPIP, |
593 | RT_TOS(eiph->tos), 0); | ||
594 | if (IS_ERR(rt) || | ||
565 | rt->dst.dev->type != ARPHRD_TUNNEL) { | 595 | rt->dst.dev->type != ARPHRD_TUNNEL) { |
566 | ip_rt_put(rt); | 596 | if (!IS_ERR(rt)) |
597 | ip_rt_put(rt); | ||
567 | goto out; | 598 | goto out; |
568 | } | 599 | } |
569 | skb_dst_set(skb2, (struct dst_entry *)rt); | 600 | skb_dst_set(skb2, &rt->dst); |
570 | } else { | 601 | } else { |
571 | ip_rt_put(rt); | 602 | ip_rt_put(rt); |
572 | if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, | 603 | if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, |
@@ -634,8 +665,8 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
634 | return 0; | 665 | return 0; |
635 | } | 666 | } |
636 | 667 | ||
637 | static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, | 668 | static void ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, |
638 | struct ipv6hdr *ipv6h, | 669 | const struct ipv6hdr *ipv6h, |
639 | struct sk_buff *skb) | 670 | struct sk_buff *skb) |
640 | { | 671 | { |
641 | __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; | 672 | __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; |
@@ -647,8 +678,8 @@ static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, | |||
647 | IP_ECN_set_ce(ip_hdr(skb)); | 678 | IP_ECN_set_ce(ip_hdr(skb)); |
648 | } | 679 | } |
649 | 680 | ||
650 | static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, | 681 | static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, |
651 | struct ipv6hdr *ipv6h, | 682 | const struct ipv6hdr *ipv6h, |
652 | struct sk_buff *skb) | 683 | struct sk_buff *skb) |
653 | { | 684 | { |
654 | if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) | 685 | if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) |
@@ -691,17 +722,19 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) | |||
691 | 722 | ||
692 | static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | 723 | static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, |
693 | __u8 ipproto, | 724 | __u8 ipproto, |
694 | void (*dscp_ecn_decapsulate)(struct ip6_tnl *t, | 725 | void (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, |
695 | struct ipv6hdr *ipv6h, | 726 | const struct ipv6hdr *ipv6h, |
696 | struct sk_buff *skb)) | 727 | struct sk_buff *skb)) |
697 | { | 728 | { |
698 | struct ip6_tnl *t; | 729 | struct ip6_tnl *t; |
699 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 730 | const struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
700 | 731 | ||
701 | rcu_read_lock(); | 732 | rcu_read_lock(); |
702 | 733 | ||
703 | if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, | 734 | if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, |
704 | &ipv6h->daddr)) != NULL) { | 735 | &ipv6h->daddr)) != NULL) { |
736 | struct pcpu_tstats *tstats; | ||
737 | |||
705 | if (t->parms.proto != ipproto && t->parms.proto != 0) { | 738 | if (t->parms.proto != ipproto && t->parms.proto != 0) { |
706 | rcu_read_unlock(); | 739 | rcu_read_unlock(); |
707 | goto discard; | 740 | goto discard; |
@@ -724,10 +757,16 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
724 | skb->pkt_type = PACKET_HOST; | 757 | skb->pkt_type = PACKET_HOST; |
725 | memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); | 758 | memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); |
726 | 759 | ||
727 | skb_tunnel_rx(skb, t->dev); | 760 | tstats = this_cpu_ptr(t->dev->tstats); |
761 | tstats->rx_packets++; | ||
762 | tstats->rx_bytes += skb->len; | ||
763 | |||
764 | __skb_tunnel_rx(skb, t->dev); | ||
728 | 765 | ||
729 | dscp_ecn_decapsulate(t, ipv6h, skb); | 766 | dscp_ecn_decapsulate(t, ipv6h, skb); |
767 | |||
730 | netif_rx(skb); | 768 | netif_rx(skb); |
769 | |||
731 | rcu_read_unlock(); | 770 | rcu_read_unlock(); |
732 | return 0; | 771 | return 0; |
733 | } | 772 | } |
@@ -785,7 +824,7 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) | |||
785 | **/ | 824 | **/ |
786 | 825 | ||
787 | static inline int | 826 | static inline int |
788 | ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr) | 827 | ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) |
789 | { | 828 | { |
790 | return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); | 829 | return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); |
791 | } | 830 | } |
@@ -841,7 +880,7 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t) | |||
841 | static int ip6_tnl_xmit2(struct sk_buff *skb, | 880 | static int ip6_tnl_xmit2(struct sk_buff *skb, |
842 | struct net_device *dev, | 881 | struct net_device *dev, |
843 | __u8 dsfield, | 882 | __u8 dsfield, |
844 | struct flowi *fl, | 883 | struct flowi6 *fl6, |
845 | int encap_limit, | 884 | int encap_limit, |
846 | __u32 *pmtu) | 885 | __u32 *pmtu) |
847 | { | 886 | { |
@@ -861,10 +900,16 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
861 | if ((dst = ip6_tnl_dst_check(t)) != NULL) | 900 | if ((dst = ip6_tnl_dst_check(t)) != NULL) |
862 | dst_hold(dst); | 901 | dst_hold(dst); |
863 | else { | 902 | else { |
864 | dst = ip6_route_output(net, NULL, fl); | 903 | dst = ip6_route_output(net, NULL, fl6); |
865 | 904 | ||
866 | if (dst->error || xfrm_lookup(net, &dst, fl, NULL, 0) < 0) | 905 | if (dst->error) |
906 | goto tx_err_link_failure; | ||
907 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); | ||
908 | if (IS_ERR(dst)) { | ||
909 | err = PTR_ERR(dst); | ||
910 | dst = NULL; | ||
867 | goto tx_err_link_failure; | 911 | goto tx_err_link_failure; |
912 | } | ||
868 | } | 913 | } |
869 | 914 | ||
870 | tdev = dst->dev; | 915 | tdev = dst->dev; |
@@ -914,7 +959,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
914 | 959 | ||
915 | skb->transport_header = skb->network_header; | 960 | skb->transport_header = skb->network_header; |
916 | 961 | ||
917 | proto = fl->proto; | 962 | proto = fl6->flowi6_proto; |
918 | if (encap_limit >= 0) { | 963 | if (encap_limit >= 0) { |
919 | init_tel_txopt(&opt, encap_limit); | 964 | init_tel_txopt(&opt, encap_limit); |
920 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); | 965 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); |
@@ -922,20 +967,22 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
922 | skb_push(skb, sizeof(struct ipv6hdr)); | 967 | skb_push(skb, sizeof(struct ipv6hdr)); |
923 | skb_reset_network_header(skb); | 968 | skb_reset_network_header(skb); |
924 | ipv6h = ipv6_hdr(skb); | 969 | ipv6h = ipv6_hdr(skb); |
925 | *(__be32*)ipv6h = fl->fl6_flowlabel | htonl(0x60000000); | 970 | *(__be32*)ipv6h = fl6->flowlabel | htonl(0x60000000); |
926 | dsfield = INET_ECN_encapsulate(0, dsfield); | 971 | dsfield = INET_ECN_encapsulate(0, dsfield); |
927 | ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); | 972 | ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); |
928 | ipv6h->hop_limit = t->parms.hop_limit; | 973 | ipv6h->hop_limit = t->parms.hop_limit; |
929 | ipv6h->nexthdr = proto; | 974 | ipv6h->nexthdr = proto; |
930 | ipv6_addr_copy(&ipv6h->saddr, &fl->fl6_src); | 975 | ipv6_addr_copy(&ipv6h->saddr, &fl6->saddr); |
931 | ipv6_addr_copy(&ipv6h->daddr, &fl->fl6_dst); | 976 | ipv6_addr_copy(&ipv6h->daddr, &fl6->daddr); |
932 | nf_reset(skb); | 977 | nf_reset(skb); |
933 | pkt_len = skb->len; | 978 | pkt_len = skb->len; |
934 | err = ip6_local_out(skb); | 979 | err = ip6_local_out(skb); |
935 | 980 | ||
936 | if (net_xmit_eval(err) == 0) { | 981 | if (net_xmit_eval(err) == 0) { |
937 | stats->tx_bytes += pkt_len; | 982 | struct pcpu_tstats *tstats = this_cpu_ptr(t->dev->tstats); |
938 | stats->tx_packets++; | 983 | |
984 | tstats->tx_bytes += pkt_len; | ||
985 | tstats->tx_packets++; | ||
939 | } else { | 986 | } else { |
940 | stats->tx_errors++; | 987 | stats->tx_errors++; |
941 | stats->tx_aborted_errors++; | 988 | stats->tx_aborted_errors++; |
@@ -954,9 +1001,9 @@ static inline int | |||
954 | ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | 1001 | ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) |
955 | { | 1002 | { |
956 | struct ip6_tnl *t = netdev_priv(dev); | 1003 | struct ip6_tnl *t = netdev_priv(dev); |
957 | struct iphdr *iph = ip_hdr(skb); | 1004 | const struct iphdr *iph = ip_hdr(skb); |
958 | int encap_limit = -1; | 1005 | int encap_limit = -1; |
959 | struct flowi fl; | 1006 | struct flowi6 fl6; |
960 | __u8 dsfield; | 1007 | __u8 dsfield; |
961 | __u32 mtu; | 1008 | __u32 mtu; |
962 | int err; | 1009 | int err; |
@@ -968,16 +1015,16 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
968 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) | 1015 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) |
969 | encap_limit = t->parms.encap_limit; | 1016 | encap_limit = t->parms.encap_limit; |
970 | 1017 | ||
971 | memcpy(&fl, &t->fl, sizeof (fl)); | 1018 | memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6)); |
972 | fl.proto = IPPROTO_IPIP; | 1019 | fl6.flowi6_proto = IPPROTO_IPIP; |
973 | 1020 | ||
974 | dsfield = ipv4_get_dsfield(iph); | 1021 | dsfield = ipv4_get_dsfield(iph); |
975 | 1022 | ||
976 | if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) | 1023 | if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) |
977 | fl.fl6_flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) | 1024 | fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) |
978 | & IPV6_TCLASS_MASK; | 1025 | & IPV6_TCLASS_MASK; |
979 | 1026 | ||
980 | err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu); | 1027 | err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); |
981 | if (err != 0) { | 1028 | if (err != 0) { |
982 | /* XXX: send ICMP error even if DF is not set. */ | 1029 | /* XXX: send ICMP error even if DF is not set. */ |
983 | if (err == -EMSGSIZE) | 1030 | if (err == -EMSGSIZE) |
@@ -996,7 +1043,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
996 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 1043 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
997 | int encap_limit = -1; | 1044 | int encap_limit = -1; |
998 | __u16 offset; | 1045 | __u16 offset; |
999 | struct flowi fl; | 1046 | struct flowi6 fl6; |
1000 | __u8 dsfield; | 1047 | __u8 dsfield; |
1001 | __u32 mtu; | 1048 | __u32 mtu; |
1002 | int err; | 1049 | int err; |
@@ -1018,16 +1065,16 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1018 | } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) | 1065 | } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) |
1019 | encap_limit = t->parms.encap_limit; | 1066 | encap_limit = t->parms.encap_limit; |
1020 | 1067 | ||
1021 | memcpy(&fl, &t->fl, sizeof (fl)); | 1068 | memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6)); |
1022 | fl.proto = IPPROTO_IPV6; | 1069 | fl6.flowi6_proto = IPPROTO_IPV6; |
1023 | 1070 | ||
1024 | dsfield = ipv6_get_dsfield(ipv6h); | 1071 | dsfield = ipv6_get_dsfield(ipv6h); |
1025 | if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) | 1072 | if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) |
1026 | fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); | 1073 | fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); |
1027 | if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)) | 1074 | if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)) |
1028 | fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK); | 1075 | fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK); |
1029 | 1076 | ||
1030 | err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu); | 1077 | err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); |
1031 | if (err != 0) { | 1078 | if (err != 0) { |
1032 | if (err == -EMSGSIZE) | 1079 | if (err == -EMSGSIZE) |
1033 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 1080 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
@@ -1090,21 +1137,21 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) | |||
1090 | { | 1137 | { |
1091 | struct net_device *dev = t->dev; | 1138 | struct net_device *dev = t->dev; |
1092 | struct ip6_tnl_parm *p = &t->parms; | 1139 | struct ip6_tnl_parm *p = &t->parms; |
1093 | struct flowi *fl = &t->fl; | 1140 | struct flowi6 *fl6 = &t->fl.u.ip6; |
1094 | 1141 | ||
1095 | memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); | 1142 | memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); |
1096 | memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); | 1143 | memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); |
1097 | 1144 | ||
1098 | /* Set up flowi template */ | 1145 | /* Set up flowi template */ |
1099 | ipv6_addr_copy(&fl->fl6_src, &p->laddr); | 1146 | ipv6_addr_copy(&fl6->saddr, &p->laddr); |
1100 | ipv6_addr_copy(&fl->fl6_dst, &p->raddr); | 1147 | ipv6_addr_copy(&fl6->daddr, &p->raddr); |
1101 | fl->oif = p->link; | 1148 | fl6->flowi6_oif = p->link; |
1102 | fl->fl6_flowlabel = 0; | 1149 | fl6->flowlabel = 0; |
1103 | 1150 | ||
1104 | if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) | 1151 | if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) |
1105 | fl->fl6_flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; | 1152 | fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; |
1106 | if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) | 1153 | if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) |
1107 | fl->fl6_flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; | 1154 | fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; |
1108 | 1155 | ||
1109 | ip6_tnl_set_cap(t); | 1156 | ip6_tnl_set_cap(t); |
1110 | 1157 | ||
@@ -1131,6 +1178,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) | |||
1131 | sizeof (struct ipv6hdr); | 1178 | sizeof (struct ipv6hdr); |
1132 | 1179 | ||
1133 | dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr); | 1180 | dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr); |
1181 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) | ||
1182 | dev->mtu-=8; | ||
1134 | 1183 | ||
1135 | if (dev->mtu < IPV6_MIN_MTU) | 1184 | if (dev->mtu < IPV6_MIN_MTU) |
1136 | dev->mtu = IPV6_MIN_MTU; | 1185 | dev->mtu = IPV6_MIN_MTU; |
@@ -1240,6 +1289,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1240 | t = netdev_priv(dev); | 1289 | t = netdev_priv(dev); |
1241 | 1290 | ||
1242 | ip6_tnl_unlink(ip6n, t); | 1291 | ip6_tnl_unlink(ip6n, t); |
1292 | synchronize_net(); | ||
1243 | err = ip6_tnl_change(t, &p); | 1293 | err = ip6_tnl_change(t, &p); |
1244 | ip6_tnl_link(ip6n, t); | 1294 | ip6_tnl_link(ip6n, t); |
1245 | netdev_state_change(dev); | 1295 | netdev_state_change(dev); |
@@ -1300,12 +1350,14 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) | |||
1300 | 1350 | ||
1301 | 1351 | ||
1302 | static const struct net_device_ops ip6_tnl_netdev_ops = { | 1352 | static const struct net_device_ops ip6_tnl_netdev_ops = { |
1303 | .ndo_uninit = ip6_tnl_dev_uninit, | 1353 | .ndo_uninit = ip6_tnl_dev_uninit, |
1304 | .ndo_start_xmit = ip6_tnl_xmit, | 1354 | .ndo_start_xmit = ip6_tnl_xmit, |
1305 | .ndo_do_ioctl = ip6_tnl_ioctl, | 1355 | .ndo_do_ioctl = ip6_tnl_ioctl, |
1306 | .ndo_change_mtu = ip6_tnl_change_mtu, | 1356 | .ndo_change_mtu = ip6_tnl_change_mtu, |
1357 | .ndo_get_stats = ip6_get_stats, | ||
1307 | }; | 1358 | }; |
1308 | 1359 | ||
1360 | |||
1309 | /** | 1361 | /** |
1310 | * ip6_tnl_dev_setup - setup virtual tunnel device | 1362 | * ip6_tnl_dev_setup - setup virtual tunnel device |
1311 | * @dev: virtual device associated with tunnel | 1363 | * @dev: virtual device associated with tunnel |
@@ -1316,15 +1368,21 @@ static const struct net_device_ops ip6_tnl_netdev_ops = { | |||
1316 | 1368 | ||
1317 | static void ip6_tnl_dev_setup(struct net_device *dev) | 1369 | static void ip6_tnl_dev_setup(struct net_device *dev) |
1318 | { | 1370 | { |
1371 | struct ip6_tnl *t; | ||
1372 | |||
1319 | dev->netdev_ops = &ip6_tnl_netdev_ops; | 1373 | dev->netdev_ops = &ip6_tnl_netdev_ops; |
1320 | dev->destructor = free_netdev; | 1374 | dev->destructor = ip6_dev_free; |
1321 | 1375 | ||
1322 | dev->type = ARPHRD_TUNNEL6; | 1376 | dev->type = ARPHRD_TUNNEL6; |
1323 | dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr); | 1377 | dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr); |
1324 | dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); | 1378 | dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); |
1379 | t = netdev_priv(dev); | ||
1380 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) | ||
1381 | dev->mtu-=8; | ||
1325 | dev->flags |= IFF_NOARP; | 1382 | dev->flags |= IFF_NOARP; |
1326 | dev->addr_len = sizeof(struct in6_addr); | 1383 | dev->addr_len = sizeof(struct in6_addr); |
1327 | dev->features |= NETIF_F_NETNS_LOCAL; | 1384 | dev->features |= NETIF_F_NETNS_LOCAL; |
1385 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | ||
1328 | } | 1386 | } |
1329 | 1387 | ||
1330 | 1388 | ||
@@ -1333,12 +1391,17 @@ static void ip6_tnl_dev_setup(struct net_device *dev) | |||
1333 | * @dev: virtual device associated with tunnel | 1391 | * @dev: virtual device associated with tunnel |
1334 | **/ | 1392 | **/ |
1335 | 1393 | ||
1336 | static inline void | 1394 | static inline int |
1337 | ip6_tnl_dev_init_gen(struct net_device *dev) | 1395 | ip6_tnl_dev_init_gen(struct net_device *dev) |
1338 | { | 1396 | { |
1339 | struct ip6_tnl *t = netdev_priv(dev); | 1397 | struct ip6_tnl *t = netdev_priv(dev); |
1398 | |||
1340 | t->dev = dev; | 1399 | t->dev = dev; |
1341 | strcpy(t->parms.name, dev->name); | 1400 | strcpy(t->parms.name, dev->name); |
1401 | dev->tstats = alloc_percpu(struct pcpu_tstats); | ||
1402 | if (!dev->tstats) | ||
1403 | return -ENOMEM; | ||
1404 | return 0; | ||
1342 | } | 1405 | } |
1343 | 1406 | ||
1344 | /** | 1407 | /** |
@@ -1346,11 +1409,15 @@ ip6_tnl_dev_init_gen(struct net_device *dev) | |||
1346 | * @dev: virtual device associated with tunnel | 1409 | * @dev: virtual device associated with tunnel |
1347 | **/ | 1410 | **/ |
1348 | 1411 | ||
1349 | static void ip6_tnl_dev_init(struct net_device *dev) | 1412 | static int ip6_tnl_dev_init(struct net_device *dev) |
1350 | { | 1413 | { |
1351 | struct ip6_tnl *t = netdev_priv(dev); | 1414 | struct ip6_tnl *t = netdev_priv(dev); |
1352 | ip6_tnl_dev_init_gen(dev); | 1415 | int err = ip6_tnl_dev_init_gen(dev); |
1416 | |||
1417 | if (err) | ||
1418 | return err; | ||
1353 | ip6_tnl_link_config(t); | 1419 | ip6_tnl_link_config(t); |
1420 | return 0; | ||
1354 | } | 1421 | } |
1355 | 1422 | ||
1356 | /** | 1423 | /** |
@@ -1360,25 +1427,29 @@ static void ip6_tnl_dev_init(struct net_device *dev) | |||
1360 | * Return: 0 | 1427 | * Return: 0 |
1361 | **/ | 1428 | **/ |
1362 | 1429 | ||
1363 | static void __net_init ip6_fb_tnl_dev_init(struct net_device *dev) | 1430 | static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) |
1364 | { | 1431 | { |
1365 | struct ip6_tnl *t = netdev_priv(dev); | 1432 | struct ip6_tnl *t = netdev_priv(dev); |
1366 | struct net *net = dev_net(dev); | 1433 | struct net *net = dev_net(dev); |
1367 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 1434 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
1435 | int err = ip6_tnl_dev_init_gen(dev); | ||
1436 | |||
1437 | if (err) | ||
1438 | return err; | ||
1368 | 1439 | ||
1369 | ip6_tnl_dev_init_gen(dev); | ||
1370 | t->parms.proto = IPPROTO_IPV6; | 1440 | t->parms.proto = IPPROTO_IPV6; |
1371 | dev_hold(dev); | 1441 | dev_hold(dev); |
1372 | ip6n->tnls_wc[0] = t; | 1442 | rcu_assign_pointer(ip6n->tnls_wc[0], t); |
1443 | return 0; | ||
1373 | } | 1444 | } |
1374 | 1445 | ||
1375 | static struct xfrm6_tunnel ip4ip6_handler = { | 1446 | static struct xfrm6_tunnel ip4ip6_handler __read_mostly = { |
1376 | .handler = ip4ip6_rcv, | 1447 | .handler = ip4ip6_rcv, |
1377 | .err_handler = ip4ip6_err, | 1448 | .err_handler = ip4ip6_err, |
1378 | .priority = 1, | 1449 | .priority = 1, |
1379 | }; | 1450 | }; |
1380 | 1451 | ||
1381 | static struct xfrm6_tunnel ip6ip6_handler = { | 1452 | static struct xfrm6_tunnel ip6ip6_handler __read_mostly = { |
1382 | .handler = ip6ip6_rcv, | 1453 | .handler = ip6ip6_rcv, |
1383 | .err_handler = ip6ip6_err, | 1454 | .err_handler = ip6ip6_err, |
1384 | .priority = 1, | 1455 | .priority = 1, |
@@ -1391,14 +1462,14 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) | |||
1391 | LIST_HEAD(list); | 1462 | LIST_HEAD(list); |
1392 | 1463 | ||
1393 | for (h = 0; h < HASH_SIZE; h++) { | 1464 | for (h = 0; h < HASH_SIZE; h++) { |
1394 | t = ip6n->tnls_r_l[h]; | 1465 | t = rtnl_dereference(ip6n->tnls_r_l[h]); |
1395 | while (t != NULL) { | 1466 | while (t != NULL) { |
1396 | unregister_netdevice_queue(t->dev, &list); | 1467 | unregister_netdevice_queue(t->dev, &list); |
1397 | t = t->next; | 1468 | t = rtnl_dereference(t->next); |
1398 | } | 1469 | } |
1399 | } | 1470 | } |
1400 | 1471 | ||
1401 | t = ip6n->tnls_wc[0]; | 1472 | t = rtnl_dereference(ip6n->tnls_wc[0]); |
1402 | unregister_netdevice_queue(t->dev, &list); | 1473 | unregister_netdevice_queue(t->dev, &list); |
1403 | unregister_netdevice_many(&list); | 1474 | unregister_netdevice_many(&list); |
1404 | } | 1475 | } |
@@ -1419,7 +1490,9 @@ static int __net_init ip6_tnl_init_net(struct net *net) | |||
1419 | goto err_alloc_dev; | 1490 | goto err_alloc_dev; |
1420 | dev_net_set(ip6n->fb_tnl_dev, net); | 1491 | dev_net_set(ip6n->fb_tnl_dev, net); |
1421 | 1492 | ||
1422 | ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); | 1493 | err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); |
1494 | if (err < 0) | ||
1495 | goto err_register; | ||
1423 | 1496 | ||
1424 | err = register_netdev(ip6n->fb_tnl_dev); | 1497 | err = register_netdev(ip6n->fb_tnl_dev); |
1425 | if (err < 0) | 1498 | if (err < 0) |
@@ -1427,7 +1500,7 @@ static int __net_init ip6_tnl_init_net(struct net *net) | |||
1427 | return 0; | 1500 | return 0; |
1428 | 1501 | ||
1429 | err_register: | 1502 | err_register: |
1430 | free_netdev(ip6n->fb_tnl_dev); | 1503 | ip6_dev_free(ip6n->fb_tnl_dev); |
1431 | err_alloc_dev: | 1504 | err_alloc_dev: |
1432 | return err; | 1505 | return err; |
1433 | } | 1506 | } |