aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ipip.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/ipip.c')
-rw-r--r--net/ipv4/ipip.c229
1 files changed, 142 insertions, 87 deletions
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ec036731a70b..378b20b7ca6e 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -122,31 +122,59 @@
122 122
123static int ipip_net_id __read_mostly; 123static int ipip_net_id __read_mostly;
124struct ipip_net { 124struct ipip_net {
125 struct ip_tunnel *tunnels_r_l[HASH_SIZE]; 125 struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
126 struct ip_tunnel *tunnels_r[HASH_SIZE]; 126 struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
127 struct ip_tunnel *tunnels_l[HASH_SIZE]; 127 struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
128 struct ip_tunnel *tunnels_wc[1]; 128 struct ip_tunnel __rcu *tunnels_wc[1];
129 struct ip_tunnel **tunnels[4]; 129 struct ip_tunnel __rcu **tunnels[4];
130 130
131 struct net_device *fb_tunnel_dev; 131 struct net_device *fb_tunnel_dev;
132}; 132};
133 133
134static void ipip_tunnel_init(struct net_device *dev); 134static int ipip_tunnel_init(struct net_device *dev);
135static void ipip_tunnel_setup(struct net_device *dev); 135static void ipip_tunnel_setup(struct net_device *dev);
136static void ipip_dev_free(struct net_device *dev);
136 137
137/* 138/*
138 * Locking : hash tables are protected by RCU and a spinlock 139 * Locking : hash tables are protected by RCU and RTNL
139 */ 140 */
140static DEFINE_SPINLOCK(ipip_lock);
141 141
142#define for_each_ip_tunnel_rcu(start) \ 142#define for_each_ip_tunnel_rcu(start) \
143 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 143 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
144 144
145/* often modified stats are per cpu, other are shared (netdev->stats) */
146struct pcpu_tstats {
147 unsigned long rx_packets;
148 unsigned long rx_bytes;
149 unsigned long tx_packets;
150 unsigned long tx_bytes;
151};
152
153static struct net_device_stats *ipip_get_stats(struct net_device *dev)
154{
155 struct pcpu_tstats sum = { 0 };
156 int i;
157
158 for_each_possible_cpu(i) {
159 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
160
161 sum.rx_packets += tstats->rx_packets;
162 sum.rx_bytes += tstats->rx_bytes;
163 sum.tx_packets += tstats->tx_packets;
164 sum.tx_bytes += tstats->tx_bytes;
165 }
166 dev->stats.rx_packets = sum.rx_packets;
167 dev->stats.rx_bytes = sum.rx_bytes;
168 dev->stats.tx_packets = sum.tx_packets;
169 dev->stats.tx_bytes = sum.tx_bytes;
170 return &dev->stats;
171}
172
145static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, 173static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
146 __be32 remote, __be32 local) 174 __be32 remote, __be32 local)
147{ 175{
148 unsigned h0 = HASH(remote); 176 unsigned int h0 = HASH(remote);
149 unsigned h1 = HASH(local); 177 unsigned int h1 = HASH(local);
150 struct ip_tunnel *t; 178 struct ip_tunnel *t;
151 struct ipip_net *ipn = net_generic(net, ipip_net_id); 179 struct ipip_net *ipn = net_generic(net, ipip_net_id);
152 180
@@ -169,12 +197,12 @@ static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
169 return NULL; 197 return NULL;
170} 198}
171 199
172static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn, 200static struct ip_tunnel __rcu **__ipip_bucket(struct ipip_net *ipn,
173 struct ip_tunnel_parm *parms) 201 struct ip_tunnel_parm *parms)
174{ 202{
175 __be32 remote = parms->iph.daddr; 203 __be32 remote = parms->iph.daddr;
176 __be32 local = parms->iph.saddr; 204 __be32 local = parms->iph.saddr;
177 unsigned h = 0; 205 unsigned int h = 0;
178 int prio = 0; 206 int prio = 0;
179 207
180 if (remote) { 208 if (remote) {
@@ -188,7 +216,7 @@ static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn,
188 return &ipn->tunnels[prio][h]; 216 return &ipn->tunnels[prio][h];
189} 217}
190 218
191static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn, 219static inline struct ip_tunnel __rcu **ipip_bucket(struct ipip_net *ipn,
192 struct ip_tunnel *t) 220 struct ip_tunnel *t)
193{ 221{
194 return __ipip_bucket(ipn, &t->parms); 222 return __ipip_bucket(ipn, &t->parms);
@@ -196,13 +224,14 @@ static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn,
196 224
197static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t) 225static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
198{ 226{
199 struct ip_tunnel **tp; 227 struct ip_tunnel __rcu **tp;
200 228 struct ip_tunnel *iter;
201 for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) { 229
202 if (t == *tp) { 230 for (tp = ipip_bucket(ipn, t);
203 spin_lock_bh(&ipip_lock); 231 (iter = rtnl_dereference(*tp)) != NULL;
204 *tp = t->next; 232 tp = &iter->next) {
205 spin_unlock_bh(&ipip_lock); 233 if (t == iter) {
234 rcu_assign_pointer(*tp, t->next);
206 break; 235 break;
207 } 236 }
208 } 237 }
@@ -210,12 +239,10 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
210 239
211static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t) 240static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
212{ 241{
213 struct ip_tunnel **tp = ipip_bucket(ipn, t); 242 struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
214 243
215 spin_lock_bh(&ipip_lock); 244 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
216 t->next = *tp;
217 rcu_assign_pointer(*tp, t); 245 rcu_assign_pointer(*tp, t);
218 spin_unlock_bh(&ipip_lock);
219} 246}
220 247
221static struct ip_tunnel * ipip_tunnel_locate(struct net *net, 248static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
@@ -223,12 +250,15 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
223{ 250{
224 __be32 remote = parms->iph.daddr; 251 __be32 remote = parms->iph.daddr;
225 __be32 local = parms->iph.saddr; 252 __be32 local = parms->iph.saddr;
226 struct ip_tunnel *t, **tp, *nt; 253 struct ip_tunnel *t, *nt;
254 struct ip_tunnel __rcu **tp;
227 struct net_device *dev; 255 struct net_device *dev;
228 char name[IFNAMSIZ]; 256 char name[IFNAMSIZ];
229 struct ipip_net *ipn = net_generic(net, ipip_net_id); 257 struct ipip_net *ipn = net_generic(net, ipip_net_id);
230 258
231 for (tp = __ipip_bucket(ipn, parms); (t = *tp) != NULL; tp = &t->next) { 259 for (tp = __ipip_bucket(ipn, parms);
260 (t = rtnl_dereference(*tp)) != NULL;
261 tp = &t->next) {
232 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) 262 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
233 return t; 263 return t;
234 } 264 }
@@ -238,7 +268,7 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
238 if (parms->name[0]) 268 if (parms->name[0])
239 strlcpy(name, parms->name, IFNAMSIZ); 269 strlcpy(name, parms->name, IFNAMSIZ);
240 else 270 else
241 sprintf(name, "tunl%%d"); 271 strcpy(name, "tunl%d");
242 272
243 dev = alloc_netdev(sizeof(*t), name, ipip_tunnel_setup); 273 dev = alloc_netdev(sizeof(*t), name, ipip_tunnel_setup);
244 if (dev == NULL) 274 if (dev == NULL)
@@ -246,15 +276,11 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
246 276
247 dev_net_set(dev, net); 277 dev_net_set(dev, net);
248 278
249 if (strchr(name, '%')) {
250 if (dev_alloc_name(dev, name) < 0)
251 goto failed_free;
252 }
253
254 nt = netdev_priv(dev); 279 nt = netdev_priv(dev);
255 nt->parms = *parms; 280 nt->parms = *parms;
256 281
257 ipip_tunnel_init(dev); 282 if (ipip_tunnel_init(dev) < 0)
283 goto failed_free;
258 284
259 if (register_netdevice(dev) < 0) 285 if (register_netdevice(dev) < 0)
260 goto failed_free; 286 goto failed_free;
@@ -264,20 +290,19 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
264 return nt; 290 return nt;
265 291
266failed_free: 292failed_free:
267 free_netdev(dev); 293 ipip_dev_free(dev);
268 return NULL; 294 return NULL;
269} 295}
270 296
297/* called with RTNL */
271static void ipip_tunnel_uninit(struct net_device *dev) 298static void ipip_tunnel_uninit(struct net_device *dev)
272{ 299{
273 struct net *net = dev_net(dev); 300 struct net *net = dev_net(dev);
274 struct ipip_net *ipn = net_generic(net, ipip_net_id); 301 struct ipip_net *ipn = net_generic(net, ipip_net_id);
275 302
276 if (dev == ipn->fb_tunnel_dev) { 303 if (dev == ipn->fb_tunnel_dev)
277 spin_lock_bh(&ipip_lock); 304 rcu_assign_pointer(ipn->tunnels_wc[0], NULL);
278 ipn->tunnels_wc[0] = NULL; 305 else
279 spin_unlock_bh(&ipip_lock);
280 } else
281 ipip_tunnel_unlink(ipn, netdev_priv(dev)); 306 ipip_tunnel_unlink(ipn, netdev_priv(dev));
282 dev_put(dev); 307 dev_put(dev);
283} 308}
@@ -289,7 +314,7 @@ static int ipip_err(struct sk_buff *skb, u32 info)
289 8 bytes of packet payload. It means, that precise relaying of 314 8 bytes of packet payload. It means, that precise relaying of
290 ICMP in the real Internet is absolutely infeasible. 315 ICMP in the real Internet is absolutely infeasible.
291 */ 316 */
292 struct iphdr *iph = (struct iphdr *)skb->data; 317 const struct iphdr *iph = (const struct iphdr *)skb->data;
293 const int type = icmp_hdr(skb)->type; 318 const int type = icmp_hdr(skb)->type;
294 const int code = icmp_hdr(skb)->code; 319 const int code = icmp_hdr(skb)->code;
295 struct ip_tunnel *t; 320 struct ip_tunnel *t;
@@ -359,8 +384,10 @@ static int ipip_rcv(struct sk_buff *skb)
359 const struct iphdr *iph = ip_hdr(skb); 384 const struct iphdr *iph = ip_hdr(skb);
360 385
361 rcu_read_lock(); 386 rcu_read_lock();
362 if ((tunnel = ipip_tunnel_lookup(dev_net(skb->dev), 387 tunnel = ipip_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
363 iph->saddr, iph->daddr)) != NULL) { 388 if (tunnel != NULL) {
389 struct pcpu_tstats *tstats;
390
364 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 391 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
365 rcu_read_unlock(); 392 rcu_read_unlock();
366 kfree_skb(skb); 393 kfree_skb(skb);
@@ -374,10 +401,16 @@ static int ipip_rcv(struct sk_buff *skb)
374 skb->protocol = htons(ETH_P_IP); 401 skb->protocol = htons(ETH_P_IP);
375 skb->pkt_type = PACKET_HOST; 402 skb->pkt_type = PACKET_HOST;
376 403
377 skb_tunnel_rx(skb, tunnel->dev); 404 tstats = this_cpu_ptr(tunnel->dev->tstats);
405 tstats->rx_packets++;
406 tstats->rx_bytes += skb->len;
407
408 __skb_tunnel_rx(skb, tunnel->dev);
378 409
379 ipip_ecn_decapsulate(iph, skb); 410 ipip_ecn_decapsulate(iph, skb);
411
380 netif_rx(skb); 412 netif_rx(skb);
413
381 rcu_read_unlock(); 414 rcu_read_unlock();
382 return 0; 415 return 0;
383 } 416 }
@@ -394,52 +427,49 @@ static int ipip_rcv(struct sk_buff *skb)
394static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 427static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
395{ 428{
396 struct ip_tunnel *tunnel = netdev_priv(dev); 429 struct ip_tunnel *tunnel = netdev_priv(dev);
397 struct net_device_stats *stats = &dev->stats; 430 struct pcpu_tstats *tstats;
398 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 431 const struct iphdr *tiph = &tunnel->parms.iph;
399 struct iphdr *tiph = &tunnel->parms.iph;
400 u8 tos = tunnel->parms.iph.tos; 432 u8 tos = tunnel->parms.iph.tos;
401 __be16 df = tiph->frag_off; 433 __be16 df = tiph->frag_off;
402 struct rtable *rt; /* Route to the other host */ 434 struct rtable *rt; /* Route to the other host */
403 struct net_device *tdev; /* Device to other host */ 435 struct net_device *tdev; /* Device to other host */
404 struct iphdr *old_iph = ip_hdr(skb); 436 const struct iphdr *old_iph = ip_hdr(skb);
405 struct iphdr *iph; /* Our new IP header */ 437 struct iphdr *iph; /* Our new IP header */
406 unsigned int max_headroom; /* The extra header space needed */ 438 unsigned int max_headroom; /* The extra header space needed */
407 __be32 dst = tiph->daddr; 439 __be32 dst = tiph->daddr;
440 struct flowi4 fl4;
408 int mtu; 441 int mtu;
409 442
410 if (skb->protocol != htons(ETH_P_IP)) 443 if (skb->protocol != htons(ETH_P_IP))
411 goto tx_error; 444 goto tx_error;
412 445
413 if (tos&1) 446 if (tos & 1)
414 tos = old_iph->tos; 447 tos = old_iph->tos;
415 448
416 if (!dst) { 449 if (!dst) {
417 /* NBMA tunnel */ 450 /* NBMA tunnel */
418 if ((rt = skb_rtable(skb)) == NULL) { 451 if ((rt = skb_rtable(skb)) == NULL) {
419 stats->tx_fifo_errors++; 452 dev->stats.tx_fifo_errors++;
420 goto tx_error; 453 goto tx_error;
421 } 454 }
422 if ((dst = rt->rt_gateway) == 0) 455 if ((dst = rt->rt_gateway) == 0)
423 goto tx_error_icmp; 456 goto tx_error_icmp;
424 } 457 }
425 458
426 { 459 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
427 struct flowi fl = { .oif = tunnel->parms.link, 460 dst, tiph->saddr,
428 .nl_u = { .ip4_u = 461 0, 0,
429 { .daddr = dst, 462 IPPROTO_IPIP, RT_TOS(tos),
430 .saddr = tiph->saddr, 463 tunnel->parms.link);
431 .tos = RT_TOS(tos) } }, 464 if (IS_ERR(rt)) {
432 .proto = IPPROTO_IPIP }; 465 dev->stats.tx_carrier_errors++;
433 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 466 goto tx_error_icmp;
434 stats->tx_carrier_errors++;
435 goto tx_error_icmp;
436 }
437 } 467 }
438 tdev = rt->dst.dev; 468 tdev = rt->dst.dev;
439 469
440 if (tdev == dev) { 470 if (tdev == dev) {
441 ip_rt_put(rt); 471 ip_rt_put(rt);
442 stats->collisions++; 472 dev->stats.collisions++;
443 goto tx_error; 473 goto tx_error;
444 } 474 }
445 475
@@ -449,7 +479,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
449 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); 479 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
450 480
451 if (mtu < 68) { 481 if (mtu < 68) {
452 stats->collisions++; 482 dev->stats.collisions++;
453 ip_rt_put(rt); 483 ip_rt_put(rt);
454 goto tx_error; 484 goto tx_error;
455 } 485 }
@@ -485,7 +515,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
485 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 515 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
486 if (!new_skb) { 516 if (!new_skb) {
487 ip_rt_put(rt); 517 ip_rt_put(rt);
488 txq->tx_dropped++; 518 dev->stats.tx_dropped++;
489 dev_kfree_skb(skb); 519 dev_kfree_skb(skb);
490 return NETDEV_TX_OK; 520 return NETDEV_TX_OK;
491 } 521 }
@@ -515,21 +545,21 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
515 iph->frag_off = df; 545 iph->frag_off = df;
516 iph->protocol = IPPROTO_IPIP; 546 iph->protocol = IPPROTO_IPIP;
517 iph->tos = INET_ECN_encapsulate(tos, old_iph->tos); 547 iph->tos = INET_ECN_encapsulate(tos, old_iph->tos);
518 iph->daddr = rt->rt_dst; 548 iph->daddr = fl4.daddr;
519 iph->saddr = rt->rt_src; 549 iph->saddr = fl4.saddr;
520 550
521 if ((iph->ttl = tiph->ttl) == 0) 551 if ((iph->ttl = tiph->ttl) == 0)
522 iph->ttl = old_iph->ttl; 552 iph->ttl = old_iph->ttl;
523 553
524 nf_reset(skb); 554 nf_reset(skb);
525 555 tstats = this_cpu_ptr(dev->tstats);
526 IPTUNNEL_XMIT(); 556 __IPTUNNEL_XMIT(tstats, &dev->stats);
527 return NETDEV_TX_OK; 557 return NETDEV_TX_OK;
528 558
529tx_error_icmp: 559tx_error_icmp:
530 dst_link_failure(skb); 560 dst_link_failure(skb);
531tx_error: 561tx_error:
532 stats->tx_errors++; 562 dev->stats.tx_errors++;
533 dev_kfree_skb(skb); 563 dev_kfree_skb(skb);
534 return NETDEV_TX_OK; 564 return NETDEV_TX_OK;
535} 565}
@@ -538,20 +568,22 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
538{ 568{
539 struct net_device *tdev = NULL; 569 struct net_device *tdev = NULL;
540 struct ip_tunnel *tunnel; 570 struct ip_tunnel *tunnel;
541 struct iphdr *iph; 571 const struct iphdr *iph;
542 572
543 tunnel = netdev_priv(dev); 573 tunnel = netdev_priv(dev);
544 iph = &tunnel->parms.iph; 574 iph = &tunnel->parms.iph;
545 575
546 if (iph->daddr) { 576 if (iph->daddr) {
547 struct flowi fl = { .oif = tunnel->parms.link,
548 .nl_u = { .ip4_u =
549 { .daddr = iph->daddr,
550 .saddr = iph->saddr,
551 .tos = RT_TOS(iph->tos) } },
552 .proto = IPPROTO_IPIP };
553 struct rtable *rt; 577 struct rtable *rt;
554 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { 578 struct flowi4 fl4;
579
580 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
581 iph->daddr, iph->saddr,
582 0, 0,
583 IPPROTO_IPIP,
584 RT_TOS(iph->tos),
585 tunnel->parms.link);
586 if (!IS_ERR(rt)) {
555 tdev = rt->dst.dev; 587 tdev = rt->dst.dev;
556 ip_rt_put(rt); 588 ip_rt_put(rt);
557 } 589 }
@@ -627,6 +659,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
627 } 659 }
628 t = netdev_priv(dev); 660 t = netdev_priv(dev);
629 ipip_tunnel_unlink(ipn, t); 661 ipip_tunnel_unlink(ipn, t);
662 synchronize_net();
630 t->parms.iph.saddr = p.iph.saddr; 663 t->parms.iph.saddr = p.iph.saddr;
631 t->parms.iph.daddr = p.iph.daddr; 664 t->parms.iph.daddr = p.iph.daddr;
632 memcpy(dev->dev_addr, &p.iph.saddr, 4); 665 memcpy(dev->dev_addr, &p.iph.saddr, 4);
@@ -696,13 +729,19 @@ static const struct net_device_ops ipip_netdev_ops = {
696 .ndo_start_xmit = ipip_tunnel_xmit, 729 .ndo_start_xmit = ipip_tunnel_xmit,
697 .ndo_do_ioctl = ipip_tunnel_ioctl, 730 .ndo_do_ioctl = ipip_tunnel_ioctl,
698 .ndo_change_mtu = ipip_tunnel_change_mtu, 731 .ndo_change_mtu = ipip_tunnel_change_mtu,
699 732 .ndo_get_stats = ipip_get_stats,
700}; 733};
701 734
735static void ipip_dev_free(struct net_device *dev)
736{
737 free_percpu(dev->tstats);
738 free_netdev(dev);
739}
740
702static void ipip_tunnel_setup(struct net_device *dev) 741static void ipip_tunnel_setup(struct net_device *dev)
703{ 742{
704 dev->netdev_ops = &ipip_netdev_ops; 743 dev->netdev_ops = &ipip_netdev_ops;
705 dev->destructor = free_netdev; 744 dev->destructor = ipip_dev_free;
706 745
707 dev->type = ARPHRD_TUNNEL; 746 dev->type = ARPHRD_TUNNEL;
708 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); 747 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
@@ -711,10 +750,11 @@ static void ipip_tunnel_setup(struct net_device *dev)
711 dev->iflink = 0; 750 dev->iflink = 0;
712 dev->addr_len = 4; 751 dev->addr_len = 4;
713 dev->features |= NETIF_F_NETNS_LOCAL; 752 dev->features |= NETIF_F_NETNS_LOCAL;
753 dev->features |= NETIF_F_LLTX;
714 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 754 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
715} 755}
716 756
717static void ipip_tunnel_init(struct net_device *dev) 757static int ipip_tunnel_init(struct net_device *dev)
718{ 758{
719 struct ip_tunnel *tunnel = netdev_priv(dev); 759 struct ip_tunnel *tunnel = netdev_priv(dev);
720 760
@@ -725,9 +765,15 @@ static void ipip_tunnel_init(struct net_device *dev)
725 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); 765 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
726 766
727 ipip_tunnel_bind_dev(dev); 767 ipip_tunnel_bind_dev(dev);
768
769 dev->tstats = alloc_percpu(struct pcpu_tstats);
770 if (!dev->tstats)
771 return -ENOMEM;
772
773 return 0;
728} 774}
729 775
730static void __net_init ipip_fb_tunnel_init(struct net_device *dev) 776static int __net_init ipip_fb_tunnel_init(struct net_device *dev)
731{ 777{
732 struct ip_tunnel *tunnel = netdev_priv(dev); 778 struct ip_tunnel *tunnel = netdev_priv(dev);
733 struct iphdr *iph = &tunnel->parms.iph; 779 struct iphdr *iph = &tunnel->parms.iph;
@@ -740,11 +786,16 @@ static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
740 iph->protocol = IPPROTO_IPIP; 786 iph->protocol = IPPROTO_IPIP;
741 iph->ihl = 5; 787 iph->ihl = 5;
742 788
789 dev->tstats = alloc_percpu(struct pcpu_tstats);
790 if (!dev->tstats)
791 return -ENOMEM;
792
743 dev_hold(dev); 793 dev_hold(dev);
744 ipn->tunnels_wc[0] = tunnel; 794 rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
795 return 0;
745} 796}
746 797
747static struct xfrm_tunnel ipip_handler = { 798static struct xfrm_tunnel ipip_handler __read_mostly = {
748 .handler = ipip_rcv, 799 .handler = ipip_rcv,
749 .err_handler = ipip_err, 800 .err_handler = ipip_err,
750 .priority = 1, 801 .priority = 1,
@@ -760,11 +811,12 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
760 for (prio = 1; prio < 4; prio++) { 811 for (prio = 1; prio < 4; prio++) {
761 int h; 812 int h;
762 for (h = 0; h < HASH_SIZE; h++) { 813 for (h = 0; h < HASH_SIZE; h++) {
763 struct ip_tunnel *t = ipn->tunnels[prio][h]; 814 struct ip_tunnel *t;
764 815
816 t = rtnl_dereference(ipn->tunnels[prio][h]);
765 while (t != NULL) { 817 while (t != NULL) {
766 unregister_netdevice_queue(t->dev, head); 818 unregister_netdevice_queue(t->dev, head);
767 t = t->next; 819 t = rtnl_dereference(t->next);
768 } 820 }
769 } 821 }
770 } 822 }
@@ -789,7 +841,9 @@ static int __net_init ipip_init_net(struct net *net)
789 } 841 }
790 dev_net_set(ipn->fb_tunnel_dev, net); 842 dev_net_set(ipn->fb_tunnel_dev, net);
791 843
792 ipip_fb_tunnel_init(ipn->fb_tunnel_dev); 844 err = ipip_fb_tunnel_init(ipn->fb_tunnel_dev);
845 if (err)
846 goto err_reg_dev;
793 847
794 if ((err = register_netdev(ipn->fb_tunnel_dev))) 848 if ((err = register_netdev(ipn->fb_tunnel_dev)))
795 goto err_reg_dev; 849 goto err_reg_dev;
@@ -797,7 +851,7 @@ static int __net_init ipip_init_net(struct net *net)
797 return 0; 851 return 0;
798 852
799err_reg_dev: 853err_reg_dev:
800 free_netdev(ipn->fb_tunnel_dev); 854 ipip_dev_free(ipn->fb_tunnel_dev);
801err_alloc_dev: 855err_alloc_dev:
802 /* nothing */ 856 /* nothing */
803 return err; 857 return err;
@@ -850,3 +904,4 @@ static void __exit ipip_fini(void)
850module_init(ipip_init); 904module_init(ipip_init);
851module_exit(ipip_fini); 905module_exit(ipip_fini);
852MODULE_LICENSE("GPL"); 906MODULE_LICENSE("GPL");
907MODULE_ALIAS_NETDEV("tunl0");