diff options
Diffstat (limited to 'net/ipv4/ipip.c')
-rw-r--r-- | net/ipv4/ipip.c | 212 |
1 files changed, 141 insertions, 71 deletions
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index ec036731a70b..e9b816e6cd73 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -122,31 +122,59 @@ | |||
122 | 122 | ||
123 | static int ipip_net_id __read_mostly; | 123 | static int ipip_net_id __read_mostly; |
124 | struct ipip_net { | 124 | struct ipip_net { |
125 | struct ip_tunnel *tunnels_r_l[HASH_SIZE]; | 125 | struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE]; |
126 | struct ip_tunnel *tunnels_r[HASH_SIZE]; | 126 | struct ip_tunnel __rcu *tunnels_r[HASH_SIZE]; |
127 | struct ip_tunnel *tunnels_l[HASH_SIZE]; | 127 | struct ip_tunnel __rcu *tunnels_l[HASH_SIZE]; |
128 | struct ip_tunnel *tunnels_wc[1]; | 128 | struct ip_tunnel __rcu *tunnels_wc[1]; |
129 | struct ip_tunnel **tunnels[4]; | 129 | struct ip_tunnel __rcu **tunnels[4]; |
130 | 130 | ||
131 | struct net_device *fb_tunnel_dev; | 131 | struct net_device *fb_tunnel_dev; |
132 | }; | 132 | }; |
133 | 133 | ||
134 | static void ipip_tunnel_init(struct net_device *dev); | 134 | static int ipip_tunnel_init(struct net_device *dev); |
135 | static void ipip_tunnel_setup(struct net_device *dev); | 135 | static void ipip_tunnel_setup(struct net_device *dev); |
136 | static void ipip_dev_free(struct net_device *dev); | ||
136 | 137 | ||
137 | /* | 138 | /* |
138 | * Locking : hash tables are protected by RCU and a spinlock | 139 | * Locking : hash tables are protected by RCU and RTNL |
139 | */ | 140 | */ |
140 | static DEFINE_SPINLOCK(ipip_lock); | ||
141 | 141 | ||
142 | #define for_each_ip_tunnel_rcu(start) \ | 142 | #define for_each_ip_tunnel_rcu(start) \ |
143 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | 143 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) |
144 | 144 | ||
145 | /* often modified stats are per cpu, other are shared (netdev->stats) */ | ||
146 | struct pcpu_tstats { | ||
147 | unsigned long rx_packets; | ||
148 | unsigned long rx_bytes; | ||
149 | unsigned long tx_packets; | ||
150 | unsigned long tx_bytes; | ||
151 | }; | ||
152 | |||
153 | static struct net_device_stats *ipip_get_stats(struct net_device *dev) | ||
154 | { | ||
155 | struct pcpu_tstats sum = { 0 }; | ||
156 | int i; | ||
157 | |||
158 | for_each_possible_cpu(i) { | ||
159 | const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); | ||
160 | |||
161 | sum.rx_packets += tstats->rx_packets; | ||
162 | sum.rx_bytes += tstats->rx_bytes; | ||
163 | sum.tx_packets += tstats->tx_packets; | ||
164 | sum.tx_bytes += tstats->tx_bytes; | ||
165 | } | ||
166 | dev->stats.rx_packets = sum.rx_packets; | ||
167 | dev->stats.rx_bytes = sum.rx_bytes; | ||
168 | dev->stats.tx_packets = sum.tx_packets; | ||
169 | dev->stats.tx_bytes = sum.tx_bytes; | ||
170 | return &dev->stats; | ||
171 | } | ||
172 | |||
145 | static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, | 173 | static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, |
146 | __be32 remote, __be32 local) | 174 | __be32 remote, __be32 local) |
147 | { | 175 | { |
148 | unsigned h0 = HASH(remote); | 176 | unsigned int h0 = HASH(remote); |
149 | unsigned h1 = HASH(local); | 177 | unsigned int h1 = HASH(local); |
150 | struct ip_tunnel *t; | 178 | struct ip_tunnel *t; |
151 | struct ipip_net *ipn = net_generic(net, ipip_net_id); | 179 | struct ipip_net *ipn = net_generic(net, ipip_net_id); |
152 | 180 | ||
@@ -169,12 +197,12 @@ static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, | |||
169 | return NULL; | 197 | return NULL; |
170 | } | 198 | } |
171 | 199 | ||
172 | static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn, | 200 | static struct ip_tunnel __rcu **__ipip_bucket(struct ipip_net *ipn, |
173 | struct ip_tunnel_parm *parms) | 201 | struct ip_tunnel_parm *parms) |
174 | { | 202 | { |
175 | __be32 remote = parms->iph.daddr; | 203 | __be32 remote = parms->iph.daddr; |
176 | __be32 local = parms->iph.saddr; | 204 | __be32 local = parms->iph.saddr; |
177 | unsigned h = 0; | 205 | unsigned int h = 0; |
178 | int prio = 0; | 206 | int prio = 0; |
179 | 207 | ||
180 | if (remote) { | 208 | if (remote) { |
@@ -188,7 +216,7 @@ static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn, | |||
188 | return &ipn->tunnels[prio][h]; | 216 | return &ipn->tunnels[prio][h]; |
189 | } | 217 | } |
190 | 218 | ||
191 | static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn, | 219 | static inline struct ip_tunnel __rcu **ipip_bucket(struct ipip_net *ipn, |
192 | struct ip_tunnel *t) | 220 | struct ip_tunnel *t) |
193 | { | 221 | { |
194 | return __ipip_bucket(ipn, &t->parms); | 222 | return __ipip_bucket(ipn, &t->parms); |
@@ -196,13 +224,14 @@ static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn, | |||
196 | 224 | ||
197 | static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t) | 225 | static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t) |
198 | { | 226 | { |
199 | struct ip_tunnel **tp; | 227 | struct ip_tunnel __rcu **tp; |
200 | 228 | struct ip_tunnel *iter; | |
201 | for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) { | 229 | |
202 | if (t == *tp) { | 230 | for (tp = ipip_bucket(ipn, t); |
203 | spin_lock_bh(&ipip_lock); | 231 | (iter = rtnl_dereference(*tp)) != NULL; |
204 | *tp = t->next; | 232 | tp = &iter->next) { |
205 | spin_unlock_bh(&ipip_lock); | 233 | if (t == iter) { |
234 | rcu_assign_pointer(*tp, t->next); | ||
206 | break; | 235 | break; |
207 | } | 236 | } |
208 | } | 237 | } |
@@ -210,12 +239,10 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t) | |||
210 | 239 | ||
211 | static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t) | 240 | static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t) |
212 | { | 241 | { |
213 | struct ip_tunnel **tp = ipip_bucket(ipn, t); | 242 | struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t); |
214 | 243 | ||
215 | spin_lock_bh(&ipip_lock); | 244 | rcu_assign_pointer(t->next, rtnl_dereference(*tp)); |
216 | t->next = *tp; | ||
217 | rcu_assign_pointer(*tp, t); | 245 | rcu_assign_pointer(*tp, t); |
218 | spin_unlock_bh(&ipip_lock); | ||
219 | } | 246 | } |
220 | 247 | ||
221 | static struct ip_tunnel * ipip_tunnel_locate(struct net *net, | 248 | static struct ip_tunnel * ipip_tunnel_locate(struct net *net, |
@@ -223,12 +250,15 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net, | |||
223 | { | 250 | { |
224 | __be32 remote = parms->iph.daddr; | 251 | __be32 remote = parms->iph.daddr; |
225 | __be32 local = parms->iph.saddr; | 252 | __be32 local = parms->iph.saddr; |
226 | struct ip_tunnel *t, **tp, *nt; | 253 | struct ip_tunnel *t, *nt; |
254 | struct ip_tunnel __rcu **tp; | ||
227 | struct net_device *dev; | 255 | struct net_device *dev; |
228 | char name[IFNAMSIZ]; | 256 | char name[IFNAMSIZ]; |
229 | struct ipip_net *ipn = net_generic(net, ipip_net_id); | 257 | struct ipip_net *ipn = net_generic(net, ipip_net_id); |
230 | 258 | ||
231 | for (tp = __ipip_bucket(ipn, parms); (t = *tp) != NULL; tp = &t->next) { | 259 | for (tp = __ipip_bucket(ipn, parms); |
260 | (t = rtnl_dereference(*tp)) != NULL; | ||
261 | tp = &t->next) { | ||
232 | if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) | 262 | if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) |
233 | return t; | 263 | return t; |
234 | } | 264 | } |
@@ -238,7 +268,7 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net, | |||
238 | if (parms->name[0]) | 268 | if (parms->name[0]) |
239 | strlcpy(name, parms->name, IFNAMSIZ); | 269 | strlcpy(name, parms->name, IFNAMSIZ); |
240 | else | 270 | else |
241 | sprintf(name, "tunl%%d"); | 271 | strcpy(name, "tunl%d"); |
242 | 272 | ||
243 | dev = alloc_netdev(sizeof(*t), name, ipip_tunnel_setup); | 273 | dev = alloc_netdev(sizeof(*t), name, ipip_tunnel_setup); |
244 | if (dev == NULL) | 274 | if (dev == NULL) |
@@ -254,7 +284,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net, | |||
254 | nt = netdev_priv(dev); | 284 | nt = netdev_priv(dev); |
255 | nt->parms = *parms; | 285 | nt->parms = *parms; |
256 | 286 | ||
257 | ipip_tunnel_init(dev); | 287 | if (ipip_tunnel_init(dev) < 0) |
288 | goto failed_free; | ||
258 | 289 | ||
259 | if (register_netdevice(dev) < 0) | 290 | if (register_netdevice(dev) < 0) |
260 | goto failed_free; | 291 | goto failed_free; |
@@ -264,20 +295,19 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net, | |||
264 | return nt; | 295 | return nt; |
265 | 296 | ||
266 | failed_free: | 297 | failed_free: |
267 | free_netdev(dev); | 298 | ipip_dev_free(dev); |
268 | return NULL; | 299 | return NULL; |
269 | } | 300 | } |
270 | 301 | ||
302 | /* called with RTNL */ | ||
271 | static void ipip_tunnel_uninit(struct net_device *dev) | 303 | static void ipip_tunnel_uninit(struct net_device *dev) |
272 | { | 304 | { |
273 | struct net *net = dev_net(dev); | 305 | struct net *net = dev_net(dev); |
274 | struct ipip_net *ipn = net_generic(net, ipip_net_id); | 306 | struct ipip_net *ipn = net_generic(net, ipip_net_id); |
275 | 307 | ||
276 | if (dev == ipn->fb_tunnel_dev) { | 308 | if (dev == ipn->fb_tunnel_dev) |
277 | spin_lock_bh(&ipip_lock); | 309 | rcu_assign_pointer(ipn->tunnels_wc[0], NULL); |
278 | ipn->tunnels_wc[0] = NULL; | 310 | else |
279 | spin_unlock_bh(&ipip_lock); | ||
280 | } else | ||
281 | ipip_tunnel_unlink(ipn, netdev_priv(dev)); | 311 | ipip_tunnel_unlink(ipn, netdev_priv(dev)); |
282 | dev_put(dev); | 312 | dev_put(dev); |
283 | } | 313 | } |
@@ -359,8 +389,10 @@ static int ipip_rcv(struct sk_buff *skb) | |||
359 | const struct iphdr *iph = ip_hdr(skb); | 389 | const struct iphdr *iph = ip_hdr(skb); |
360 | 390 | ||
361 | rcu_read_lock(); | 391 | rcu_read_lock(); |
362 | if ((tunnel = ipip_tunnel_lookup(dev_net(skb->dev), | 392 | tunnel = ipip_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr); |
363 | iph->saddr, iph->daddr)) != NULL) { | 393 | if (tunnel != NULL) { |
394 | struct pcpu_tstats *tstats; | ||
395 | |||
364 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 396 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
365 | rcu_read_unlock(); | 397 | rcu_read_unlock(); |
366 | kfree_skb(skb); | 398 | kfree_skb(skb); |
@@ -374,10 +406,16 @@ static int ipip_rcv(struct sk_buff *skb) | |||
374 | skb->protocol = htons(ETH_P_IP); | 406 | skb->protocol = htons(ETH_P_IP); |
375 | skb->pkt_type = PACKET_HOST; | 407 | skb->pkt_type = PACKET_HOST; |
376 | 408 | ||
377 | skb_tunnel_rx(skb, tunnel->dev); | 409 | tstats = this_cpu_ptr(tunnel->dev->tstats); |
410 | tstats->rx_packets++; | ||
411 | tstats->rx_bytes += skb->len; | ||
412 | |||
413 | __skb_tunnel_rx(skb, tunnel->dev); | ||
378 | 414 | ||
379 | ipip_ecn_decapsulate(iph, skb); | 415 | ipip_ecn_decapsulate(iph, skb); |
416 | |||
380 | netif_rx(skb); | 417 | netif_rx(skb); |
418 | |||
381 | rcu_read_unlock(); | 419 | rcu_read_unlock(); |
382 | return 0; | 420 | return 0; |
383 | } | 421 | } |
@@ -394,13 +432,12 @@ static int ipip_rcv(struct sk_buff *skb) | |||
394 | static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 432 | static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
395 | { | 433 | { |
396 | struct ip_tunnel *tunnel = netdev_priv(dev); | 434 | struct ip_tunnel *tunnel = netdev_priv(dev); |
397 | struct net_device_stats *stats = &dev->stats; | 435 | struct pcpu_tstats *tstats; |
398 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | ||
399 | struct iphdr *tiph = &tunnel->parms.iph; | 436 | struct iphdr *tiph = &tunnel->parms.iph; |
400 | u8 tos = tunnel->parms.iph.tos; | 437 | u8 tos = tunnel->parms.iph.tos; |
401 | __be16 df = tiph->frag_off; | 438 | __be16 df = tiph->frag_off; |
402 | struct rtable *rt; /* Route to the other host */ | 439 | struct rtable *rt; /* Route to the other host */ |
403 | struct net_device *tdev; /* Device to other host */ | 440 | struct net_device *tdev; /* Device to other host */ |
404 | struct iphdr *old_iph = ip_hdr(skb); | 441 | struct iphdr *old_iph = ip_hdr(skb); |
405 | struct iphdr *iph; /* Our new IP header */ | 442 | struct iphdr *iph; /* Our new IP header */ |
406 | unsigned int max_headroom; /* The extra header space needed */ | 443 | unsigned int max_headroom; /* The extra header space needed */ |
@@ -410,13 +447,13 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
410 | if (skb->protocol != htons(ETH_P_IP)) | 447 | if (skb->protocol != htons(ETH_P_IP)) |
411 | goto tx_error; | 448 | goto tx_error; |
412 | 449 | ||
413 | if (tos&1) | 450 | if (tos & 1) |
414 | tos = old_iph->tos; | 451 | tos = old_iph->tos; |
415 | 452 | ||
416 | if (!dst) { | 453 | if (!dst) { |
417 | /* NBMA tunnel */ | 454 | /* NBMA tunnel */ |
418 | if ((rt = skb_rtable(skb)) == NULL) { | 455 | if ((rt = skb_rtable(skb)) == NULL) { |
419 | stats->tx_fifo_errors++; | 456 | dev->stats.tx_fifo_errors++; |
420 | goto tx_error; | 457 | goto tx_error; |
421 | } | 458 | } |
422 | if ((dst = rt->rt_gateway) == 0) | 459 | if ((dst = rt->rt_gateway) == 0) |
@@ -424,14 +461,20 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
424 | } | 461 | } |
425 | 462 | ||
426 | { | 463 | { |
427 | struct flowi fl = { .oif = tunnel->parms.link, | 464 | struct flowi fl = { |
428 | .nl_u = { .ip4_u = | 465 | .oif = tunnel->parms.link, |
429 | { .daddr = dst, | 466 | .nl_u = { |
430 | .saddr = tiph->saddr, | 467 | .ip4_u = { |
431 | .tos = RT_TOS(tos) } }, | 468 | .daddr = dst, |
432 | .proto = IPPROTO_IPIP }; | 469 | .saddr = tiph->saddr, |
470 | .tos = RT_TOS(tos) | ||
471 | } | ||
472 | }, | ||
473 | .proto = IPPROTO_IPIP | ||
474 | }; | ||
475 | |||
433 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) { | 476 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) { |
434 | stats->tx_carrier_errors++; | 477 | dev->stats.tx_carrier_errors++; |
435 | goto tx_error_icmp; | 478 | goto tx_error_icmp; |
436 | } | 479 | } |
437 | } | 480 | } |
@@ -439,7 +482,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
439 | 482 | ||
440 | if (tdev == dev) { | 483 | if (tdev == dev) { |
441 | ip_rt_put(rt); | 484 | ip_rt_put(rt); |
442 | stats->collisions++; | 485 | dev->stats.collisions++; |
443 | goto tx_error; | 486 | goto tx_error; |
444 | } | 487 | } |
445 | 488 | ||
@@ -449,7 +492,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
449 | mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); | 492 | mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); |
450 | 493 | ||
451 | if (mtu < 68) { | 494 | if (mtu < 68) { |
452 | stats->collisions++; | 495 | dev->stats.collisions++; |
453 | ip_rt_put(rt); | 496 | ip_rt_put(rt); |
454 | goto tx_error; | 497 | goto tx_error; |
455 | } | 498 | } |
@@ -485,7 +528,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
485 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); | 528 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); |
486 | if (!new_skb) { | 529 | if (!new_skb) { |
487 | ip_rt_put(rt); | 530 | ip_rt_put(rt); |
488 | txq->tx_dropped++; | 531 | dev->stats.tx_dropped++; |
489 | dev_kfree_skb(skb); | 532 | dev_kfree_skb(skb); |
490 | return NETDEV_TX_OK; | 533 | return NETDEV_TX_OK; |
491 | } | 534 | } |
@@ -522,14 +565,14 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
522 | iph->ttl = old_iph->ttl; | 565 | iph->ttl = old_iph->ttl; |
523 | 566 | ||
524 | nf_reset(skb); | 567 | nf_reset(skb); |
525 | 568 | tstats = this_cpu_ptr(dev->tstats); | |
526 | IPTUNNEL_XMIT(); | 569 | __IPTUNNEL_XMIT(tstats, &dev->stats); |
527 | return NETDEV_TX_OK; | 570 | return NETDEV_TX_OK; |
528 | 571 | ||
529 | tx_error_icmp: | 572 | tx_error_icmp: |
530 | dst_link_failure(skb); | 573 | dst_link_failure(skb); |
531 | tx_error: | 574 | tx_error: |
532 | stats->tx_errors++; | 575 | dev->stats.tx_errors++; |
533 | dev_kfree_skb(skb); | 576 | dev_kfree_skb(skb); |
534 | return NETDEV_TX_OK; | 577 | return NETDEV_TX_OK; |
535 | } | 578 | } |
@@ -544,13 +587,19 @@ static void ipip_tunnel_bind_dev(struct net_device *dev) | |||
544 | iph = &tunnel->parms.iph; | 587 | iph = &tunnel->parms.iph; |
545 | 588 | ||
546 | if (iph->daddr) { | 589 | if (iph->daddr) { |
547 | struct flowi fl = { .oif = tunnel->parms.link, | 590 | struct flowi fl = { |
548 | .nl_u = { .ip4_u = | 591 | .oif = tunnel->parms.link, |
549 | { .daddr = iph->daddr, | 592 | .nl_u = { |
550 | .saddr = iph->saddr, | 593 | .ip4_u = { |
551 | .tos = RT_TOS(iph->tos) } }, | 594 | .daddr = iph->daddr, |
552 | .proto = IPPROTO_IPIP }; | 595 | .saddr = iph->saddr, |
596 | .tos = RT_TOS(iph->tos) | ||
597 | } | ||
598 | }, | ||
599 | .proto = IPPROTO_IPIP | ||
600 | }; | ||
553 | struct rtable *rt; | 601 | struct rtable *rt; |
602 | |||
554 | if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { | 603 | if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { |
555 | tdev = rt->dst.dev; | 604 | tdev = rt->dst.dev; |
556 | ip_rt_put(rt); | 605 | ip_rt_put(rt); |
@@ -696,13 +745,19 @@ static const struct net_device_ops ipip_netdev_ops = { | |||
696 | .ndo_start_xmit = ipip_tunnel_xmit, | 745 | .ndo_start_xmit = ipip_tunnel_xmit, |
697 | .ndo_do_ioctl = ipip_tunnel_ioctl, | 746 | .ndo_do_ioctl = ipip_tunnel_ioctl, |
698 | .ndo_change_mtu = ipip_tunnel_change_mtu, | 747 | .ndo_change_mtu = ipip_tunnel_change_mtu, |
699 | 748 | .ndo_get_stats = ipip_get_stats, | |
700 | }; | 749 | }; |
701 | 750 | ||
751 | static void ipip_dev_free(struct net_device *dev) | ||
752 | { | ||
753 | free_percpu(dev->tstats); | ||
754 | free_netdev(dev); | ||
755 | } | ||
756 | |||
702 | static void ipip_tunnel_setup(struct net_device *dev) | 757 | static void ipip_tunnel_setup(struct net_device *dev) |
703 | { | 758 | { |
704 | dev->netdev_ops = &ipip_netdev_ops; | 759 | dev->netdev_ops = &ipip_netdev_ops; |
705 | dev->destructor = free_netdev; | 760 | dev->destructor = ipip_dev_free; |
706 | 761 | ||
707 | dev->type = ARPHRD_TUNNEL; | 762 | dev->type = ARPHRD_TUNNEL; |
708 | dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); | 763 | dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); |
@@ -711,10 +766,11 @@ static void ipip_tunnel_setup(struct net_device *dev) | |||
711 | dev->iflink = 0; | 766 | dev->iflink = 0; |
712 | dev->addr_len = 4; | 767 | dev->addr_len = 4; |
713 | dev->features |= NETIF_F_NETNS_LOCAL; | 768 | dev->features |= NETIF_F_NETNS_LOCAL; |
769 | dev->features |= NETIF_F_LLTX; | ||
714 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | 770 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; |
715 | } | 771 | } |
716 | 772 | ||
717 | static void ipip_tunnel_init(struct net_device *dev) | 773 | static int ipip_tunnel_init(struct net_device *dev) |
718 | { | 774 | { |
719 | struct ip_tunnel *tunnel = netdev_priv(dev); | 775 | struct ip_tunnel *tunnel = netdev_priv(dev); |
720 | 776 | ||
@@ -725,9 +781,15 @@ static void ipip_tunnel_init(struct net_device *dev) | |||
725 | memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); | 781 | memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); |
726 | 782 | ||
727 | ipip_tunnel_bind_dev(dev); | 783 | ipip_tunnel_bind_dev(dev); |
784 | |||
785 | dev->tstats = alloc_percpu(struct pcpu_tstats); | ||
786 | if (!dev->tstats) | ||
787 | return -ENOMEM; | ||
788 | |||
789 | return 0; | ||
728 | } | 790 | } |
729 | 791 | ||
730 | static void __net_init ipip_fb_tunnel_init(struct net_device *dev) | 792 | static int __net_init ipip_fb_tunnel_init(struct net_device *dev) |
731 | { | 793 | { |
732 | struct ip_tunnel *tunnel = netdev_priv(dev); | 794 | struct ip_tunnel *tunnel = netdev_priv(dev); |
733 | struct iphdr *iph = &tunnel->parms.iph; | 795 | struct iphdr *iph = &tunnel->parms.iph; |
@@ -740,11 +802,16 @@ static void __net_init ipip_fb_tunnel_init(struct net_device *dev) | |||
740 | iph->protocol = IPPROTO_IPIP; | 802 | iph->protocol = IPPROTO_IPIP; |
741 | iph->ihl = 5; | 803 | iph->ihl = 5; |
742 | 804 | ||
805 | dev->tstats = alloc_percpu(struct pcpu_tstats); | ||
806 | if (!dev->tstats) | ||
807 | return -ENOMEM; | ||
808 | |||
743 | dev_hold(dev); | 809 | dev_hold(dev); |
744 | ipn->tunnels_wc[0] = tunnel; | 810 | rcu_assign_pointer(ipn->tunnels_wc[0], tunnel); |
811 | return 0; | ||
745 | } | 812 | } |
746 | 813 | ||
747 | static struct xfrm_tunnel ipip_handler = { | 814 | static struct xfrm_tunnel ipip_handler __read_mostly = { |
748 | .handler = ipip_rcv, | 815 | .handler = ipip_rcv, |
749 | .err_handler = ipip_err, | 816 | .err_handler = ipip_err, |
750 | .priority = 1, | 817 | .priority = 1, |
@@ -760,11 +827,12 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head) | |||
760 | for (prio = 1; prio < 4; prio++) { | 827 | for (prio = 1; prio < 4; prio++) { |
761 | int h; | 828 | int h; |
762 | for (h = 0; h < HASH_SIZE; h++) { | 829 | for (h = 0; h < HASH_SIZE; h++) { |
763 | struct ip_tunnel *t = ipn->tunnels[prio][h]; | 830 | struct ip_tunnel *t; |
764 | 831 | ||
832 | t = rtnl_dereference(ipn->tunnels[prio][h]); | ||
765 | while (t != NULL) { | 833 | while (t != NULL) { |
766 | unregister_netdevice_queue(t->dev, head); | 834 | unregister_netdevice_queue(t->dev, head); |
767 | t = t->next; | 835 | t = rtnl_dereference(t->next); |
768 | } | 836 | } |
769 | } | 837 | } |
770 | } | 838 | } |
@@ -789,7 +857,9 @@ static int __net_init ipip_init_net(struct net *net) | |||
789 | } | 857 | } |
790 | dev_net_set(ipn->fb_tunnel_dev, net); | 858 | dev_net_set(ipn->fb_tunnel_dev, net); |
791 | 859 | ||
792 | ipip_fb_tunnel_init(ipn->fb_tunnel_dev); | 860 | err = ipip_fb_tunnel_init(ipn->fb_tunnel_dev); |
861 | if (err) | ||
862 | goto err_reg_dev; | ||
793 | 863 | ||
794 | if ((err = register_netdev(ipn->fb_tunnel_dev))) | 864 | if ((err = register_netdev(ipn->fb_tunnel_dev))) |
795 | goto err_reg_dev; | 865 | goto err_reg_dev; |
@@ -797,7 +867,7 @@ static int __net_init ipip_init_net(struct net *net) | |||
797 | return 0; | 867 | return 0; |
798 | 868 | ||
799 | err_reg_dev: | 869 | err_reg_dev: |
800 | free_netdev(ipn->fb_tunnel_dev); | 870 | ipip_dev_free(ipn->fb_tunnel_dev); |
801 | err_alloc_dev: | 871 | err_alloc_dev: |
802 | /* nothing */ | 872 | /* nothing */ |
803 | return err; | 873 | return err; |