diff options
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r-- | net/ipv4/route.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 2941ef21f203..7768d718e199 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -284,7 +284,7 @@ static struct rtable *rt_cache_get_first(struct rt_cache_iter_state *st) | |||
284 | rcu_read_lock_bh(); | 284 | rcu_read_lock_bh(); |
285 | r = rcu_dereference(rt_hash_table[st->bucket].chain); | 285 | r = rcu_dereference(rt_hash_table[st->bucket].chain); |
286 | while (r) { | 286 | while (r) { |
287 | if (r->u.dst.dev->nd_net == st->p.net && | 287 | if (dev_net(r->u.dst.dev) == st->p.net && |
288 | r->rt_genid == st->genid) | 288 | r->rt_genid == st->genid) |
289 | return r; | 289 | return r; |
290 | r = rcu_dereference(r->u.dst.rt_next); | 290 | r = rcu_dereference(r->u.dst.rt_next); |
@@ -312,7 +312,7 @@ static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st, | |||
312 | struct rtable *r) | 312 | struct rtable *r) |
313 | { | 313 | { |
314 | while ((r = __rt_cache_get_next(st, r)) != NULL) { | 314 | while ((r = __rt_cache_get_next(st, r)) != NULL) { |
315 | if (r->u.dst.dev->nd_net != st->p.net) | 315 | if (dev_net(r->u.dst.dev) != st->p.net) |
316 | continue; | 316 | continue; |
317 | if (r->rt_genid == st->genid) | 317 | if (r->rt_genid == st->genid) |
318 | break; | 318 | break; |
@@ -680,7 +680,7 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) | |||
680 | 680 | ||
681 | static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) | 681 | static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) |
682 | { | 682 | { |
683 | return rt1->u.dst.dev->nd_net == rt2->u.dst.dev->nd_net; | 683 | return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev); |
684 | } | 684 | } |
685 | 685 | ||
686 | /* | 686 | /* |
@@ -1164,7 +1164,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1164 | if (!in_dev) | 1164 | if (!in_dev) |
1165 | return; | 1165 | return; |
1166 | 1166 | ||
1167 | net = dev->nd_net; | 1167 | net = dev_net(dev); |
1168 | if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) | 1168 | if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) |
1169 | || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) | 1169 | || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) |
1170 | || ipv4_is_zeronet(new_gw)) | 1170 | || ipv4_is_zeronet(new_gw)) |
@@ -1195,7 +1195,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1195 | rth->fl.oif != ikeys[k] || | 1195 | rth->fl.oif != ikeys[k] || |
1196 | rth->fl.iif != 0 || | 1196 | rth->fl.iif != 0 || |
1197 | rth->rt_genid != atomic_read(&rt_genid) || | 1197 | rth->rt_genid != atomic_read(&rt_genid) || |
1198 | rth->u.dst.dev->nd_net != net) { | 1198 | dev_net(rth->u.dst.dev) != net) { |
1199 | rthp = &rth->u.dst.rt_next; | 1199 | rthp = &rth->u.dst.rt_next; |
1200 | continue; | 1200 | continue; |
1201 | } | 1201 | } |
@@ -1454,7 +1454,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, | |||
1454 | rth->rt_src == iph->saddr && | 1454 | rth->rt_src == iph->saddr && |
1455 | rth->fl.iif == 0 && | 1455 | rth->fl.iif == 0 && |
1456 | !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) && | 1456 | !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) && |
1457 | rth->u.dst.dev->nd_net == net && | 1457 | dev_net(rth->u.dst.dev) == net && |
1458 | rth->rt_genid == atomic_read(&rt_genid)) { | 1458 | rth->rt_genid == atomic_read(&rt_genid)) { |
1459 | unsigned short mtu = new_mtu; | 1459 | unsigned short mtu = new_mtu; |
1460 | 1460 | ||
@@ -1530,9 +1530,9 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, | |||
1530 | { | 1530 | { |
1531 | struct rtable *rt = (struct rtable *) dst; | 1531 | struct rtable *rt = (struct rtable *) dst; |
1532 | struct in_device *idev = rt->idev; | 1532 | struct in_device *idev = rt->idev; |
1533 | if (dev != dev->nd_net->loopback_dev && idev && idev->dev == dev) { | 1533 | if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) { |
1534 | struct in_device *loopback_idev = | 1534 | struct in_device *loopback_idev = |
1535 | in_dev_get(dev->nd_net->loopback_dev); | 1535 | in_dev_get(dev_net(dev)->loopback_dev); |
1536 | if (loopback_idev) { | 1536 | if (loopback_idev) { |
1537 | rt->idev = loopback_idev; | 1537 | rt->idev = loopback_idev; |
1538 | in_dev_put(idev); | 1538 | in_dev_put(idev); |
@@ -1576,7 +1576,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt) | |||
1576 | 1576 | ||
1577 | if (rt->fl.iif == 0) | 1577 | if (rt->fl.iif == 0) |
1578 | src = rt->rt_src; | 1578 | src = rt->rt_src; |
1579 | else if (fib_lookup(rt->u.dst.dev->nd_net, &rt->fl, &res) == 0) { | 1579 | else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) { |
1580 | src = FIB_RES_PREFSRC(res); | 1580 | src = FIB_RES_PREFSRC(res); |
1581 | fib_res_put(&res); | 1581 | fib_res_put(&res); |
1582 | } else | 1582 | } else |
@@ -1900,7 +1900,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1900 | __be32 spec_dst; | 1900 | __be32 spec_dst; |
1901 | int err = -EINVAL; | 1901 | int err = -EINVAL; |
1902 | int free_res = 0; | 1902 | int free_res = 0; |
1903 | struct net * net = dev->nd_net; | 1903 | struct net * net = dev_net(dev); |
1904 | 1904 | ||
1905 | /* IP on this device is disabled. */ | 1905 | /* IP on this device is disabled. */ |
1906 | 1906 | ||
@@ -2071,7 +2071,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2071 | int iif = dev->ifindex; | 2071 | int iif = dev->ifindex; |
2072 | struct net *net; | 2072 | struct net *net; |
2073 | 2073 | ||
2074 | net = dev->nd_net; | 2074 | net = dev_net(dev); |
2075 | tos &= IPTOS_RT_MASK; | 2075 | tos &= IPTOS_RT_MASK; |
2076 | hash = rt_hash(daddr, saddr, iif); | 2076 | hash = rt_hash(daddr, saddr, iif); |
2077 | 2077 | ||
@@ -2084,7 +2084,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2084 | rth->fl.oif == 0 && | 2084 | rth->fl.oif == 0 && |
2085 | rth->fl.mark == skb->mark && | 2085 | rth->fl.mark == skb->mark && |
2086 | rth->fl.fl4_tos == tos && | 2086 | rth->fl.fl4_tos == tos && |
2087 | rth->u.dst.dev->nd_net == net && | 2087 | dev_net(rth->u.dst.dev) == net && |
2088 | rth->rt_genid == atomic_read(&rt_genid)) { | 2088 | rth->rt_genid == atomic_read(&rt_genid)) { |
2089 | dst_use(&rth->u.dst, jiffies); | 2089 | dst_use(&rth->u.dst, jiffies); |
2090 | RT_CACHE_STAT_INC(in_hit); | 2090 | RT_CACHE_STAT_INC(in_hit); |
@@ -2486,7 +2486,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, | |||
2486 | rth->fl.mark == flp->mark && | 2486 | rth->fl.mark == flp->mark && |
2487 | !((rth->fl.fl4_tos ^ flp->fl4_tos) & | 2487 | !((rth->fl.fl4_tos ^ flp->fl4_tos) & |
2488 | (IPTOS_RT_MASK | RTO_ONLINK)) && | 2488 | (IPTOS_RT_MASK | RTO_ONLINK)) && |
2489 | rth->u.dst.dev->nd_net == net && | 2489 | dev_net(rth->u.dst.dev) == net && |
2490 | rth->rt_genid == atomic_read(&rt_genid)) { | 2490 | rth->rt_genid == atomic_read(&rt_genid)) { |
2491 | dst_use(&rth->u.dst, jiffies); | 2491 | dst_use(&rth->u.dst, jiffies); |
2492 | RT_CACHE_STAT_INC(out_hit); | 2492 | RT_CACHE_STAT_INC(out_hit); |
@@ -2795,7 +2795,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2795 | rcu_read_lock_bh(); | 2795 | rcu_read_lock_bh(); |
2796 | for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; | 2796 | for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; |
2797 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { | 2797 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { |
2798 | if (rt->u.dst.dev->nd_net != net || idx < s_idx) | 2798 | if (dev_net(rt->u.dst.dev) != net || idx < s_idx) |
2799 | continue; | 2799 | continue; |
2800 | if (rt->rt_genid != atomic_read(&rt_genid)) | 2800 | if (rt->rt_genid != atomic_read(&rt_genid)) |
2801 | continue; | 2801 | continue; |