diff options
author | Denis V. Lunev <den@openvz.org> | 2008-07-05 22:04:32 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-05 22:04:32 -0400 |
commit | e84f84f276473dcc673f360e8ff3203148bdf0e2 (patch) | |
tree | c7ea9b10807acef5fb9c636d2b8b74204d32a37e /net/ipv4 | |
parent | b00180defdeeac8e07e3dc02e53e7395d42bbd19 (diff) |
netns: place rt_genid into struct net
Signed-off-by: Denis V. Lunev <den@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/route.c | 76 |
1 files changed, 43 insertions, 33 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e4e37edbad6..67c3ed772c2 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -250,7 +250,6 @@ static inline void rt_hash_lock_init(void) | |||
250 | static struct rt_hash_bucket *rt_hash_table __read_mostly; | 250 | static struct rt_hash_bucket *rt_hash_table __read_mostly; |
251 | static unsigned rt_hash_mask __read_mostly; | 251 | static unsigned rt_hash_mask __read_mostly; |
252 | static unsigned int rt_hash_log __read_mostly; | 252 | static unsigned int rt_hash_log __read_mostly; |
253 | static atomic_t rt_genid __read_mostly; | ||
254 | 253 | ||
255 | static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); | 254 | static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); |
256 | #define RT_CACHE_STAT_INC(field) \ | 255 | #define RT_CACHE_STAT_INC(field) \ |
@@ -265,6 +264,11 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, | |||
265 | & rt_hash_mask; | 264 | & rt_hash_mask; |
266 | } | 265 | } |
267 | 266 | ||
267 | static inline int rt_genid(struct net *net) | ||
268 | { | ||
269 | return atomic_read(&net->ipv4.rt_genid); | ||
270 | } | ||
271 | |||
268 | #ifdef CONFIG_PROC_FS | 272 | #ifdef CONFIG_PROC_FS |
269 | struct rt_cache_iter_state { | 273 | struct rt_cache_iter_state { |
270 | struct seq_net_private p; | 274 | struct seq_net_private p; |
@@ -334,7 +338,7 @@ static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) | |||
334 | struct rt_cache_iter_state *st = seq->private; | 338 | struct rt_cache_iter_state *st = seq->private; |
335 | if (*pos) | 339 | if (*pos) |
336 | return rt_cache_get_idx(seq, *pos - 1); | 340 | return rt_cache_get_idx(seq, *pos - 1); |
337 | st->genid = atomic_read(&rt_genid); | 341 | st->genid = rt_genid(seq_file_net(seq)); |
338 | return SEQ_START_TOKEN; | 342 | return SEQ_START_TOKEN; |
339 | } | 343 | } |
340 | 344 | ||
@@ -681,6 +685,11 @@ static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) | |||
681 | return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev); | 685 | return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev); |
682 | } | 686 | } |
683 | 687 | ||
688 | static inline int rt_is_expired(struct rtable *rth) | ||
689 | { | ||
690 | return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev)); | ||
691 | } | ||
692 | |||
684 | /* | 693 | /* |
685 | * Perform a full scan of hash table and free all entries. | 694 | * Perform a full scan of hash table and free all entries. |
686 | * Can be called by a softirq or a process. | 695 | * Can be called by a softirq or a process. |
@@ -736,7 +745,7 @@ static void rt_check_expire(void) | |||
736 | continue; | 745 | continue; |
737 | spin_lock_bh(rt_hash_lock_addr(i)); | 746 | spin_lock_bh(rt_hash_lock_addr(i)); |
738 | while ((rth = *rthp) != NULL) { | 747 | while ((rth = *rthp) != NULL) { |
739 | if (rth->rt_genid != atomic_read(&rt_genid)) { | 748 | if (rt_is_expired(rth)) { |
740 | *rthp = rth->u.dst.rt_next; | 749 | *rthp = rth->u.dst.rt_next; |
741 | rt_free(rth); | 750 | rt_free(rth); |
742 | continue; | 751 | continue; |
@@ -784,7 +793,7 @@ static void rt_cache_invalidate(struct net *net) | |||
784 | unsigned char shuffle; | 793 | unsigned char shuffle; |
785 | 794 | ||
786 | get_random_bytes(&shuffle, sizeof(shuffle)); | 795 | get_random_bytes(&shuffle, sizeof(shuffle)); |
787 | atomic_add(shuffle + 1U, &rt_genid); | 796 | atomic_add(shuffle + 1U, &net->ipv4.rt_genid); |
788 | } | 797 | } |
789 | 798 | ||
790 | /* | 799 | /* |
@@ -881,7 +890,7 @@ static int rt_garbage_collect(struct dst_ops *ops) | |||
881 | rthp = &rt_hash_table[k].chain; | 890 | rthp = &rt_hash_table[k].chain; |
882 | spin_lock_bh(rt_hash_lock_addr(k)); | 891 | spin_lock_bh(rt_hash_lock_addr(k)); |
883 | while ((rth = *rthp) != NULL) { | 892 | while ((rth = *rthp) != NULL) { |
884 | if (rth->rt_genid == atomic_read(&rt_genid) && | 893 | if (!rt_is_expired(rth) && |
885 | !rt_may_expire(rth, tmo, expire)) { | 894 | !rt_may_expire(rth, tmo, expire)) { |
886 | tmo >>= 1; | 895 | tmo >>= 1; |
887 | rthp = &rth->u.dst.rt_next; | 896 | rthp = &rth->u.dst.rt_next; |
@@ -963,7 +972,7 @@ restart: | |||
963 | 972 | ||
964 | spin_lock_bh(rt_hash_lock_addr(hash)); | 973 | spin_lock_bh(rt_hash_lock_addr(hash)); |
965 | while ((rth = *rthp) != NULL) { | 974 | while ((rth = *rthp) != NULL) { |
966 | if (rth->rt_genid != atomic_read(&rt_genid)) { | 975 | if (rt_is_expired(rth)) { |
967 | *rthp = rth->u.dst.rt_next; | 976 | *rthp = rth->u.dst.rt_next; |
968 | rt_free(rth); | 977 | rt_free(rth); |
969 | continue; | 978 | continue; |
@@ -1139,7 +1148,7 @@ static void rt_del(unsigned hash, struct rtable *rt) | |||
1139 | spin_lock_bh(rt_hash_lock_addr(hash)); | 1148 | spin_lock_bh(rt_hash_lock_addr(hash)); |
1140 | ip_rt_put(rt); | 1149 | ip_rt_put(rt); |
1141 | while ((aux = *rthp) != NULL) { | 1150 | while ((aux = *rthp) != NULL) { |
1142 | if (aux == rt || (aux->rt_genid != atomic_read(&rt_genid))) { | 1151 | if (aux == rt || rt_is_expired(aux)) { |
1143 | *rthp = aux->u.dst.rt_next; | 1152 | *rthp = aux->u.dst.rt_next; |
1144 | rt_free(aux); | 1153 | rt_free(aux); |
1145 | continue; | 1154 | continue; |
@@ -1182,7 +1191,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1182 | for (i = 0; i < 2; i++) { | 1191 | for (i = 0; i < 2; i++) { |
1183 | for (k = 0; k < 2; k++) { | 1192 | for (k = 0; k < 2; k++) { |
1184 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], | 1193 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], |
1185 | atomic_read(&rt_genid)); | 1194 | rt_genid(net)); |
1186 | 1195 | ||
1187 | rthp=&rt_hash_table[hash].chain; | 1196 | rthp=&rt_hash_table[hash].chain; |
1188 | 1197 | ||
@@ -1194,7 +1203,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1194 | rth->fl.fl4_src != skeys[i] || | 1203 | rth->fl.fl4_src != skeys[i] || |
1195 | rth->fl.oif != ikeys[k] || | 1204 | rth->fl.oif != ikeys[k] || |
1196 | rth->fl.iif != 0 || | 1205 | rth->fl.iif != 0 || |
1197 | rth->rt_genid != atomic_read(&rt_genid) || | 1206 | rt_is_expired(rth) || |
1198 | !net_eq(dev_net(rth->u.dst.dev), net)) { | 1207 | !net_eq(dev_net(rth->u.dst.dev), net)) { |
1199 | rthp = &rth->u.dst.rt_next; | 1208 | rthp = &rth->u.dst.rt_next; |
1200 | continue; | 1209 | continue; |
@@ -1233,7 +1242,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1233 | rt->u.dst.neighbour = NULL; | 1242 | rt->u.dst.neighbour = NULL; |
1234 | rt->u.dst.hh = NULL; | 1243 | rt->u.dst.hh = NULL; |
1235 | rt->u.dst.xfrm = NULL; | 1244 | rt->u.dst.xfrm = NULL; |
1236 | rt->rt_genid = atomic_read(&rt_genid); | 1245 | rt->rt_genid = rt_genid(net); |
1237 | rt->rt_flags |= RTCF_REDIRECTED; | 1246 | rt->rt_flags |= RTCF_REDIRECTED; |
1238 | 1247 | ||
1239 | /* Gateway is different ... */ | 1248 | /* Gateway is different ... */ |
@@ -1298,7 +1307,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) | |||
1298 | rt->u.dst.expires) { | 1307 | rt->u.dst.expires) { |
1299 | unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, | 1308 | unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, |
1300 | rt->fl.oif, | 1309 | rt->fl.oif, |
1301 | atomic_read(&rt_genid)); | 1310 | rt_genid(dev_net(dst->dev))); |
1302 | #if RT_CACHE_DEBUG >= 1 | 1311 | #if RT_CACHE_DEBUG >= 1 |
1303 | printk(KERN_DEBUG "ipv4_negative_advice: redirect to " | 1312 | printk(KERN_DEBUG "ipv4_negative_advice: redirect to " |
1304 | NIPQUAD_FMT "/%02x dropped\n", | 1313 | NIPQUAD_FMT "/%02x dropped\n", |
@@ -1448,7 +1457,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, | |||
1448 | for (k = 0; k < 2; k++) { | 1457 | for (k = 0; k < 2; k++) { |
1449 | for (i = 0; i < 2; i++) { | 1458 | for (i = 0; i < 2; i++) { |
1450 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], | 1459 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], |
1451 | atomic_read(&rt_genid)); | 1460 | rt_genid(net)); |
1452 | 1461 | ||
1453 | rcu_read_lock(); | 1462 | rcu_read_lock(); |
1454 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; | 1463 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; |
@@ -1463,7 +1472,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, | |||
1463 | rth->fl.iif != 0 || | 1472 | rth->fl.iif != 0 || |
1464 | dst_metric_locked(&rth->u.dst, RTAX_MTU) || | 1473 | dst_metric_locked(&rth->u.dst, RTAX_MTU) || |
1465 | !net_eq(dev_net(rth->u.dst.dev), net) || | 1474 | !net_eq(dev_net(rth->u.dst.dev), net) || |
1466 | rth->rt_genid != atomic_read(&rt_genid)) | 1475 | !rt_is_expired(rth)) |
1467 | continue; | 1476 | continue; |
1468 | 1477 | ||
1469 | if (new_mtu < 68 || new_mtu >= old_mtu) { | 1478 | if (new_mtu < 68 || new_mtu >= old_mtu) { |
@@ -1698,7 +1707,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1698 | rth->fl.oif = 0; | 1707 | rth->fl.oif = 0; |
1699 | rth->rt_gateway = daddr; | 1708 | rth->rt_gateway = daddr; |
1700 | rth->rt_spec_dst= spec_dst; | 1709 | rth->rt_spec_dst= spec_dst; |
1701 | rth->rt_genid = atomic_read(&rt_genid); | 1710 | rth->rt_genid = rt_genid(dev_net(dev)); |
1702 | rth->rt_flags = RTCF_MULTICAST; | 1711 | rth->rt_flags = RTCF_MULTICAST; |
1703 | rth->rt_type = RTN_MULTICAST; | 1712 | rth->rt_type = RTN_MULTICAST; |
1704 | if (our) { | 1713 | if (our) { |
@@ -1713,7 +1722,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1713 | RT_CACHE_STAT_INC(in_slow_mc); | 1722 | RT_CACHE_STAT_INC(in_slow_mc); |
1714 | 1723 | ||
1715 | in_dev_put(in_dev); | 1724 | in_dev_put(in_dev); |
1716 | hash = rt_hash(daddr, saddr, dev->ifindex, atomic_read(&rt_genid)); | 1725 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); |
1717 | return rt_intern_hash(hash, rth, &skb->rtable); | 1726 | return rt_intern_hash(hash, rth, &skb->rtable); |
1718 | 1727 | ||
1719 | e_nobufs: | 1728 | e_nobufs: |
@@ -1839,7 +1848,7 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1839 | 1848 | ||
1840 | rth->u.dst.input = ip_forward; | 1849 | rth->u.dst.input = ip_forward; |
1841 | rth->u.dst.output = ip_output; | 1850 | rth->u.dst.output = ip_output; |
1842 | rth->rt_genid = atomic_read(&rt_genid); | 1851 | rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); |
1843 | 1852 | ||
1844 | rt_set_nexthop(rth, res, itag); | 1853 | rt_set_nexthop(rth, res, itag); |
1845 | 1854 | ||
@@ -1874,7 +1883,8 @@ static int ip_mkroute_input(struct sk_buff *skb, | |||
1874 | return err; | 1883 | return err; |
1875 | 1884 | ||
1876 | /* put it into the cache */ | 1885 | /* put it into the cache */ |
1877 | hash = rt_hash(daddr, saddr, fl->iif, atomic_read(&rt_genid)); | 1886 | hash = rt_hash(daddr, saddr, fl->iif, |
1887 | rt_genid(dev_net(rth->u.dst.dev))); | ||
1878 | return rt_intern_hash(hash, rth, &skb->rtable); | 1888 | return rt_intern_hash(hash, rth, &skb->rtable); |
1879 | } | 1889 | } |
1880 | 1890 | ||
@@ -2000,7 +2010,7 @@ local_input: | |||
2000 | goto e_nobufs; | 2010 | goto e_nobufs; |
2001 | 2011 | ||
2002 | rth->u.dst.output= ip_rt_bug; | 2012 | rth->u.dst.output= ip_rt_bug; |
2003 | rth->rt_genid = atomic_read(&rt_genid); | 2013 | rth->rt_genid = rt_genid(net); |
2004 | 2014 | ||
2005 | atomic_set(&rth->u.dst.__refcnt, 1); | 2015 | atomic_set(&rth->u.dst.__refcnt, 1); |
2006 | rth->u.dst.flags= DST_HOST; | 2016 | rth->u.dst.flags= DST_HOST; |
@@ -2030,7 +2040,7 @@ local_input: | |||
2030 | rth->rt_flags &= ~RTCF_LOCAL; | 2040 | rth->rt_flags &= ~RTCF_LOCAL; |
2031 | } | 2041 | } |
2032 | rth->rt_type = res.type; | 2042 | rth->rt_type = res.type; |
2033 | hash = rt_hash(daddr, saddr, fl.iif, atomic_read(&rt_genid)); | 2043 | hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); |
2034 | err = rt_intern_hash(hash, rth, &skb->rtable); | 2044 | err = rt_intern_hash(hash, rth, &skb->rtable); |
2035 | goto done; | 2045 | goto done; |
2036 | 2046 | ||
@@ -2081,7 +2091,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2081 | 2091 | ||
2082 | net = dev_net(dev); | 2092 | net = dev_net(dev); |
2083 | tos &= IPTOS_RT_MASK; | 2093 | tos &= IPTOS_RT_MASK; |
2084 | hash = rt_hash(daddr, saddr, iif, atomic_read(&rt_genid)); | 2094 | hash = rt_hash(daddr, saddr, iif, rt_genid(net)); |
2085 | 2095 | ||
2086 | rcu_read_lock(); | 2096 | rcu_read_lock(); |
2087 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; | 2097 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; |
@@ -2093,7 +2103,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2093 | (rth->fl.fl4_tos ^ tos)) == 0 && | 2103 | (rth->fl.fl4_tos ^ tos)) == 0 && |
2094 | rth->fl.mark == skb->mark && | 2104 | rth->fl.mark == skb->mark && |
2095 | net_eq(dev_net(rth->u.dst.dev), net) && | 2105 | net_eq(dev_net(rth->u.dst.dev), net) && |
2096 | rth->rt_genid == atomic_read(&rt_genid)) { | 2106 | !rt_is_expired(rth)) { |
2097 | dst_use(&rth->u.dst, jiffies); | 2107 | dst_use(&rth->u.dst, jiffies); |
2098 | RT_CACHE_STAT_INC(in_hit); | 2108 | RT_CACHE_STAT_INC(in_hit); |
2099 | rcu_read_unlock(); | 2109 | rcu_read_unlock(); |
@@ -2221,7 +2231,7 @@ static int __mkroute_output(struct rtable **result, | |||
2221 | rth->rt_spec_dst= fl->fl4_src; | 2231 | rth->rt_spec_dst= fl->fl4_src; |
2222 | 2232 | ||
2223 | rth->u.dst.output=ip_output; | 2233 | rth->u.dst.output=ip_output; |
2224 | rth->rt_genid = atomic_read(&rt_genid); | 2234 | rth->rt_genid = rt_genid(dev_net(dev_out)); |
2225 | 2235 | ||
2226 | RT_CACHE_STAT_INC(out_slow_tot); | 2236 | RT_CACHE_STAT_INC(out_slow_tot); |
2227 | 2237 | ||
@@ -2271,7 +2281,7 @@ static int ip_mkroute_output(struct rtable **rp, | |||
2271 | unsigned hash; | 2281 | unsigned hash; |
2272 | if (err == 0) { | 2282 | if (err == 0) { |
2273 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, | 2283 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, |
2274 | atomic_read(&rt_genid)); | 2284 | rt_genid(dev_net(dev_out))); |
2275 | err = rt_intern_hash(hash, rth, rp); | 2285 | err = rt_intern_hash(hash, rth, rp); |
2276 | } | 2286 | } |
2277 | 2287 | ||
@@ -2483,8 +2493,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, | |||
2483 | unsigned hash; | 2493 | unsigned hash; |
2484 | struct rtable *rth; | 2494 | struct rtable *rth; |
2485 | 2495 | ||
2486 | hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, | 2496 | hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net)); |
2487 | atomic_read(&rt_genid)); | ||
2488 | 2497 | ||
2489 | rcu_read_lock_bh(); | 2498 | rcu_read_lock_bh(); |
2490 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; | 2499 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; |
@@ -2497,7 +2506,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, | |||
2497 | !((rth->fl.fl4_tos ^ flp->fl4_tos) & | 2506 | !((rth->fl.fl4_tos ^ flp->fl4_tos) & |
2498 | (IPTOS_RT_MASK | RTO_ONLINK)) && | 2507 | (IPTOS_RT_MASK | RTO_ONLINK)) && |
2499 | net_eq(dev_net(rth->u.dst.dev), net) && | 2508 | net_eq(dev_net(rth->u.dst.dev), net) && |
2500 | rth->rt_genid == atomic_read(&rt_genid)) { | 2509 | !rt_is_expired(rth)) { |
2501 | dst_use(&rth->u.dst, jiffies); | 2510 | dst_use(&rth->u.dst, jiffies); |
2502 | RT_CACHE_STAT_INC(out_hit); | 2511 | RT_CACHE_STAT_INC(out_hit); |
2503 | rcu_read_unlock_bh(); | 2512 | rcu_read_unlock_bh(); |
@@ -2528,7 +2537,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { | |||
2528 | }; | 2537 | }; |
2529 | 2538 | ||
2530 | 2539 | ||
2531 | static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp) | 2540 | static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp) |
2532 | { | 2541 | { |
2533 | struct rtable *ort = *rp; | 2542 | struct rtable *ort = *rp; |
2534 | struct rtable *rt = (struct rtable *) | 2543 | struct rtable *rt = (struct rtable *) |
@@ -2552,7 +2561,7 @@ static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp) | |||
2552 | rt->idev = ort->idev; | 2561 | rt->idev = ort->idev; |
2553 | if (rt->idev) | 2562 | if (rt->idev) |
2554 | in_dev_hold(rt->idev); | 2563 | in_dev_hold(rt->idev); |
2555 | rt->rt_genid = atomic_read(&rt_genid); | 2564 | rt->rt_genid = rt_genid(net); |
2556 | rt->rt_flags = ort->rt_flags; | 2565 | rt->rt_flags = ort->rt_flags; |
2557 | rt->rt_type = ort->rt_type; | 2566 | rt->rt_type = ort->rt_type; |
2558 | rt->rt_dst = ort->rt_dst; | 2567 | rt->rt_dst = ort->rt_dst; |
@@ -2588,7 +2597,7 @@ int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp, | |||
2588 | err = __xfrm_lookup((struct dst_entry **)rp, flp, sk, | 2597 | err = __xfrm_lookup((struct dst_entry **)rp, flp, sk, |
2589 | flags ? XFRM_LOOKUP_WAIT : 0); | 2598 | flags ? XFRM_LOOKUP_WAIT : 0); |
2590 | if (err == -EREMOTE) | 2599 | if (err == -EREMOTE) |
2591 | err = ipv4_dst_blackhole(rp, flp); | 2600 | err = ipv4_dst_blackhole(net, rp, flp); |
2592 | 2601 | ||
2593 | return err; | 2602 | return err; |
2594 | } | 2603 | } |
@@ -2807,7 +2816,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2807 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { | 2816 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { |
2808 | if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) | 2817 | if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) |
2809 | continue; | 2818 | continue; |
2810 | if (rt->rt_genid != atomic_read(&rt_genid)) | 2819 | if (rt_is_expired(rt)) |
2811 | continue; | 2820 | continue; |
2812 | skb->dst = dst_clone(&rt->u.dst); | 2821 | skb->dst = dst_clone(&rt->u.dst); |
2813 | if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, | 2822 | if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, |
@@ -3081,6 +3090,10 @@ static __net_initdata struct pernet_operations sysctl_route_ops = { | |||
3081 | 3090 | ||
3082 | static __net_init int rt_secret_timer_init(struct net *net) | 3091 | static __net_init int rt_secret_timer_init(struct net *net) |
3083 | { | 3092 | { |
3093 | atomic_set(&net->ipv4.rt_genid, | ||
3094 | (int) ((num_physpages ^ (num_physpages>>8)) ^ | ||
3095 | (jiffies ^ (jiffies >> 7)))); | ||
3096 | |||
3084 | net->ipv4.rt_secret_timer.function = rt_secret_rebuild; | 3097 | net->ipv4.rt_secret_timer.function = rt_secret_rebuild; |
3085 | net->ipv4.rt_secret_timer.data = (unsigned long)net; | 3098 | net->ipv4.rt_secret_timer.data = (unsigned long)net; |
3086 | init_timer_deferrable(&net->ipv4.rt_secret_timer); | 3099 | init_timer_deferrable(&net->ipv4.rt_secret_timer); |
@@ -3121,9 +3134,6 @@ int __init ip_rt_init(void) | |||
3121 | { | 3134 | { |
3122 | int rc = 0; | 3135 | int rc = 0; |
3123 | 3136 | ||
3124 | atomic_set(&rt_genid, (int) ((num_physpages ^ (num_physpages>>8)) ^ | ||
3125 | (jiffies ^ (jiffies >> 7)))); | ||
3126 | |||
3127 | #ifdef CONFIG_NET_CLS_ROUTE | 3137 | #ifdef CONFIG_NET_CLS_ROUTE |
3128 | ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct)); | 3138 | ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct)); |
3129 | if (!ip_rt_acct) | 3139 | if (!ip_rt_acct) |