aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/route.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r--net/ipv4/route.c97
1 files changed, 65 insertions, 32 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 7b5e8e1d94be..1051326c36b2 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -273,6 +273,7 @@ static unsigned int rt_hash_code(u32 daddr, u32 saddr)
273 273
274#ifdef CONFIG_PROC_FS 274#ifdef CONFIG_PROC_FS
275struct rt_cache_iter_state { 275struct rt_cache_iter_state {
276 struct seq_net_private p;
276 int bucket; 277 int bucket;
277 int genid; 278 int genid;
278}; 279};
@@ -285,7 +286,8 @@ static struct rtable *rt_cache_get_first(struct rt_cache_iter_state *st)
285 rcu_read_lock_bh(); 286 rcu_read_lock_bh();
286 r = rcu_dereference(rt_hash_table[st->bucket].chain); 287 r = rcu_dereference(rt_hash_table[st->bucket].chain);
287 while (r) { 288 while (r) {
288 if (r->rt_genid == st->genid) 289 if (r->u.dst.dev->nd_net == st->p.net &&
290 r->rt_genid == st->genid)
289 return r; 291 return r;
290 r = rcu_dereference(r->u.dst.rt_next); 292 r = rcu_dereference(r->u.dst.rt_next);
291 } 293 }
@@ -294,7 +296,8 @@ static struct rtable *rt_cache_get_first(struct rt_cache_iter_state *st)
294 return r; 296 return r;
295} 297}
296 298
297static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st, struct rtable *r) 299static struct rtable *__rt_cache_get_next(struct rt_cache_iter_state *st,
300 struct rtable *r)
298{ 301{
299 r = r->u.dst.rt_next; 302 r = r->u.dst.rt_next;
300 while (!r) { 303 while (!r) {
@@ -307,16 +310,25 @@ static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st, struct r
307 return rcu_dereference(r); 310 return rcu_dereference(r);
308} 311}
309 312
313static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st,
314 struct rtable *r)
315{
316 while ((r = __rt_cache_get_next(st, r)) != NULL) {
317 if (r->u.dst.dev->nd_net != st->p.net)
318 continue;
319 if (r->rt_genid == st->genid)
320 break;
321 }
322 return r;
323}
324
310static struct rtable *rt_cache_get_idx(struct rt_cache_iter_state *st, loff_t pos) 325static struct rtable *rt_cache_get_idx(struct rt_cache_iter_state *st, loff_t pos)
311{ 326{
312 struct rtable *r = rt_cache_get_first(st); 327 struct rtable *r = rt_cache_get_first(st);
313 328
314 if (r) 329 if (r)
315 while (pos && (r = rt_cache_get_next(st, r))) { 330 while (pos && (r = rt_cache_get_next(st, r)))
316 if (r->rt_genid != st->genid)
317 continue;
318 --pos; 331 --pos;
319 }
320 return pos ? NULL : r; 332 return pos ? NULL : r;
321} 333}
322 334
@@ -390,7 +402,7 @@ static const struct seq_operations rt_cache_seq_ops = {
390 402
391static int rt_cache_seq_open(struct inode *inode, struct file *file) 403static int rt_cache_seq_open(struct inode *inode, struct file *file)
392{ 404{
393 return seq_open_private(file, &rt_cache_seq_ops, 405 return seq_open_net(inode, file, &rt_cache_seq_ops,
394 sizeof(struct rt_cache_iter_state)); 406 sizeof(struct rt_cache_iter_state));
395} 407}
396 408
@@ -399,7 +411,7 @@ static const struct file_operations rt_cache_seq_fops = {
399 .open = rt_cache_seq_open, 411 .open = rt_cache_seq_open,
400 .read = seq_read, 412 .read = seq_read,
401 .llseek = seq_lseek, 413 .llseek = seq_lseek,
402 .release = seq_release_private, 414 .release = seq_release_net,
403}; 415};
404 416
405 417
@@ -533,7 +545,7 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
533} 545}
534#endif 546#endif
535 547
536static __init int ip_rt_proc_init(struct net *net) 548static int __net_init ip_rt_do_proc_init(struct net *net)
537{ 549{
538 struct proc_dir_entry *pde; 550 struct proc_dir_entry *pde;
539 551
@@ -564,8 +576,26 @@ err2:
564err1: 576err1:
565 return -ENOMEM; 577 return -ENOMEM;
566} 578}
579
580static void __net_exit ip_rt_do_proc_exit(struct net *net)
581{
582 remove_proc_entry("rt_cache", net->proc_net_stat);
583 remove_proc_entry("rt_cache", net->proc_net);
584 remove_proc_entry("rt_acct", net->proc_net);
585}
586
587static struct pernet_operations ip_rt_proc_ops __net_initdata = {
588 .init = ip_rt_do_proc_init,
589 .exit = ip_rt_do_proc_exit,
590};
591
592static int __init ip_rt_proc_init(void)
593{
594 return register_pernet_subsys(&ip_rt_proc_ops);
595}
596
567#else 597#else
568static inline int ip_rt_proc_init(struct net *net) 598static inline int ip_rt_proc_init(void)
569{ 599{
570 return 0; 600 return 0;
571} 601}
@@ -1131,10 +1161,12 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1131 __be32 skeys[2] = { saddr, 0 }; 1161 __be32 skeys[2] = { saddr, 0 };
1132 int ikeys[2] = { dev->ifindex, 0 }; 1162 int ikeys[2] = { dev->ifindex, 0 };
1133 struct netevent_redirect netevent; 1163 struct netevent_redirect netevent;
1164 struct net *net;
1134 1165
1135 if (!in_dev) 1166 if (!in_dev)
1136 return; 1167 return;
1137 1168
1169 net = dev->nd_net;
1138 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) 1170 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
1139 || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) 1171 || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw)
1140 || ipv4_is_zeronet(new_gw)) 1172 || ipv4_is_zeronet(new_gw))
@@ -1146,7 +1178,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1146 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev)) 1178 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1147 goto reject_redirect; 1179 goto reject_redirect;
1148 } else { 1180 } else {
1149 if (inet_addr_type(&init_net, new_gw) != RTN_UNICAST) 1181 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1150 goto reject_redirect; 1182 goto reject_redirect;
1151 } 1183 }
1152 1184
@@ -1164,7 +1196,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1164 rth->fl.fl4_src != skeys[i] || 1196 rth->fl.fl4_src != skeys[i] ||
1165 rth->fl.oif != ikeys[k] || 1197 rth->fl.oif != ikeys[k] ||
1166 rth->fl.iif != 0 || 1198 rth->fl.iif != 0 ||
1167 rth->rt_genid != atomic_read(&rt_genid)) { 1199 rth->rt_genid != atomic_read(&rt_genid) ||
1200 rth->u.dst.dev->nd_net != net) {
1168 rthp = &rth->u.dst.rt_next; 1201 rthp = &rth->u.dst.rt_next;
1169 continue; 1202 continue;
1170 } 1203 }
@@ -1256,7 +1289,7 @@ reject_redirect:
1256 1289
1257static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) 1290static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1258{ 1291{
1259 struct rtable *rt = (struct rtable*)dst; 1292 struct rtable *rt = (struct rtable *)dst;
1260 struct dst_entry *ret = dst; 1293 struct dst_entry *ret = dst;
1261 1294
1262 if (rt) { 1295 if (rt) {
@@ -1297,7 +1330,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1297 1330
1298void ip_rt_send_redirect(struct sk_buff *skb) 1331void ip_rt_send_redirect(struct sk_buff *skb)
1299{ 1332{
1300 struct rtable *rt = (struct rtable*)skb->dst; 1333 struct rtable *rt = skb->rtable;
1301 struct in_device *in_dev = in_dev_get(rt->u.dst.dev); 1334 struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
1302 1335
1303 if (!in_dev) 1336 if (!in_dev)
@@ -1346,7 +1379,7 @@ out:
1346 1379
1347static int ip_error(struct sk_buff *skb) 1380static int ip_error(struct sk_buff *skb)
1348{ 1381{
1349 struct rtable *rt = (struct rtable*)skb->dst; 1382 struct rtable *rt = skb->rtable;
1350 unsigned long now; 1383 unsigned long now;
1351 int code; 1384 int code;
1352 1385
@@ -1515,7 +1548,7 @@ static void ipv4_link_failure(struct sk_buff *skb)
1515 1548
1516 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); 1549 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1517 1550
1518 rt = (struct rtable *) skb->dst; 1551 rt = skb->rtable;
1519 if (rt) 1552 if (rt)
1520 dst_set_expires(&rt->u.dst, 0); 1553 dst_set_expires(&rt->u.dst, 0);
1521} 1554}
@@ -1675,7 +1708,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1675 1708
1676 in_dev_put(in_dev); 1709 in_dev_put(in_dev);
1677 hash = rt_hash(daddr, saddr, dev->ifindex); 1710 hash = rt_hash(daddr, saddr, dev->ifindex);
1678 return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst); 1711 return rt_intern_hash(hash, rth, &skb->rtable);
1679 1712
1680e_nobufs: 1713e_nobufs:
1681 in_dev_put(in_dev); 1714 in_dev_put(in_dev);
@@ -1836,7 +1869,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb,
1836 1869
1837 /* put it into the cache */ 1870 /* put it into the cache */
1838 hash = rt_hash(daddr, saddr, fl->iif); 1871 hash = rt_hash(daddr, saddr, fl->iif);
1839 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); 1872 return rt_intern_hash(hash, rth, &skb->rtable);
1840} 1873}
1841 1874
1842/* 1875/*
@@ -1992,7 +2025,7 @@ local_input:
1992 } 2025 }
1993 rth->rt_type = res.type; 2026 rth->rt_type = res.type;
1994 hash = rt_hash(daddr, saddr, fl.iif); 2027 hash = rt_hash(daddr, saddr, fl.iif);
1995 err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); 2028 err = rt_intern_hash(hash, rth, &skb->rtable);
1996 goto done; 2029 goto done;
1997 2030
1998no_route: 2031no_route:
@@ -2058,7 +2091,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2058 dst_use(&rth->u.dst, jiffies); 2091 dst_use(&rth->u.dst, jiffies);
2059 RT_CACHE_STAT_INC(in_hit); 2092 RT_CACHE_STAT_INC(in_hit);
2060 rcu_read_unlock(); 2093 rcu_read_unlock();
2061 skb->dst = (struct dst_entry*)rth; 2094 skb->rtable = rth;
2062 return 0; 2095 return 0;
2063 } 2096 }
2064 RT_CACHE_STAT_INC(in_hlist_search); 2097 RT_CACHE_STAT_INC(in_hlist_search);
@@ -2565,7 +2598,7 @@ int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
2565static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 2598static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2566 int nowait, unsigned int flags) 2599 int nowait, unsigned int flags)
2567{ 2600{
2568 struct rtable *rt = (struct rtable*)skb->dst; 2601 struct rtable *rt = skb->rtable;
2569 struct rtmsg *r; 2602 struct rtmsg *r;
2570 struct nlmsghdr *nlh; 2603 struct nlmsghdr *nlh;
2571 long expires; 2604 long expires;
@@ -2668,9 +2701,6 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2668 int err; 2701 int err;
2669 struct sk_buff *skb; 2702 struct sk_buff *skb;
2670 2703
2671 if (net != &init_net)
2672 return -EINVAL;
2673
2674 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy); 2704 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2675 if (err < 0) 2705 if (err < 0)
2676 goto errout; 2706 goto errout;
@@ -2700,7 +2730,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2700 if (iif) { 2730 if (iif) {
2701 struct net_device *dev; 2731 struct net_device *dev;
2702 2732
2703 dev = __dev_get_by_index(&init_net, iif); 2733 dev = __dev_get_by_index(net, iif);
2704 if (dev == NULL) { 2734 if (dev == NULL) {
2705 err = -ENODEV; 2735 err = -ENODEV;
2706 goto errout_free; 2736 goto errout_free;
@@ -2712,7 +2742,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2712 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); 2742 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2713 local_bh_enable(); 2743 local_bh_enable();
2714 2744
2715 rt = (struct rtable*) skb->dst; 2745 rt = skb->rtable;
2716 if (err == 0 && rt->u.dst.error) 2746 if (err == 0 && rt->u.dst.error)
2717 err = -rt->u.dst.error; 2747 err = -rt->u.dst.error;
2718 } else { 2748 } else {
@@ -2726,22 +2756,22 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2726 }, 2756 },
2727 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0, 2757 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2728 }; 2758 };
2729 err = ip_route_output_key(&init_net, &rt, &fl); 2759 err = ip_route_output_key(net, &rt, &fl);
2730 } 2760 }
2731 2761
2732 if (err) 2762 if (err)
2733 goto errout_free; 2763 goto errout_free;
2734 2764
2735 skb->dst = &rt->u.dst; 2765 skb->rtable = rt;
2736 if (rtm->rtm_flags & RTM_F_NOTIFY) 2766 if (rtm->rtm_flags & RTM_F_NOTIFY)
2737 rt->rt_flags |= RTCF_NOTIFY; 2767 rt->rt_flags |= RTCF_NOTIFY;
2738 2768
2739 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 2769 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2740 RTM_NEWROUTE, 0, 0); 2770 RTM_NEWROUTE, 0, 0);
2741 if (err <= 0) 2771 if (err <= 0)
2742 goto errout_free; 2772 goto errout_free;
2743 2773
2744 err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); 2774 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2745errout: 2775errout:
2746 return err; 2776 return err;
2747 2777
@@ -2755,6 +2785,9 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2755 struct rtable *rt; 2785 struct rtable *rt;
2756 int h, s_h; 2786 int h, s_h;
2757 int idx, s_idx; 2787 int idx, s_idx;
2788 struct net *net;
2789
2790 net = skb->sk->sk_net;
2758 2791
2759 s_h = cb->args[0]; 2792 s_h = cb->args[0];
2760 if (s_h < 0) 2793 if (s_h < 0)
@@ -2764,7 +2797,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2764 rcu_read_lock_bh(); 2797 rcu_read_lock_bh();
2765 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; 2798 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2766 rt = rcu_dereference(rt->u.dst.rt_next), idx++) { 2799 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2767 if (idx < s_idx) 2800 if (rt->u.dst.dev->nd_net != net || idx < s_idx)
2768 continue; 2801 continue;
2769 if (rt->rt_genid != atomic_read(&rt_genid)) 2802 if (rt->rt_genid != atomic_read(&rt_genid))
2770 continue; 2803 continue;
@@ -3040,7 +3073,7 @@ int __init ip_rt_init(void)
3040 ip_rt_secret_interval; 3073 ip_rt_secret_interval;
3041 add_timer(&rt_secret_timer); 3074 add_timer(&rt_secret_timer);
3042 3075
3043 if (ip_rt_proc_init(&init_net)) 3076 if (ip_rt_proc_init())
3044 printk(KERN_ERR "Unable to create route proc files\n"); 3077 printk(KERN_ERR "Unable to create route proc files\n");
3045#ifdef CONFIG_XFRM 3078#ifdef CONFIG_XFRM
3046 xfrm_init(); 3079 xfrm_init();