aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/route.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r--net/ipv4/route.c164
1 files changed, 82 insertions, 82 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index baee304a3cb7..5b3834b38a2d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -20,7 +20,7 @@
20 * (rco@di.uminho.pt) Routing table insertion and update 20 * (rco@di.uminho.pt) Routing table insertion and update
21 * Linus Torvalds : Rewrote bits to be sensible 21 * Linus Torvalds : Rewrote bits to be sensible
22 * Alan Cox : Added BSD route gw semantics 22 * Alan Cox : Added BSD route gw semantics
23 * Alan Cox : Super /proc >4K 23 * Alan Cox : Super /proc >4K
24 * Alan Cox : MTU in route table 24 * Alan Cox : MTU in route table
25 * Alan Cox : MSS actually. Also added the window 25 * Alan Cox : MSS actually. Also added the window
26 * clamper. 26 * clamper.
@@ -38,7 +38,7 @@
38 * Alan Cox : Faster /proc handling 38 * Alan Cox : Faster /proc handling
39 * Alexey Kuznetsov : Massive rework to support tree based routing, 39 * Alexey Kuznetsov : Massive rework to support tree based routing,
40 * routing caches and better behaviour. 40 * routing caches and better behaviour.
41 * 41 *
42 * Olaf Erb : irtt wasn't being copied right. 42 * Olaf Erb : irtt wasn't being copied right.
43 * Bjorn Ekwall : Kerneld route support. 43 * Bjorn Ekwall : Kerneld route support.
44 * Alan Cox : Multicast fixed (I hope) 44 * Alan Cox : Multicast fixed (I hope)
@@ -289,7 +289,7 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
289{ 289{
290 struct rt_cache_iter_state *st = rcu_dereference(seq->private); 290 struct rt_cache_iter_state *st = rcu_dereference(seq->private);
291 291
292 r = r->u.rt_next; 292 r = r->u.dst.rt_next;
293 while (!r) { 293 while (!r) {
294 rcu_read_unlock_bh(); 294 rcu_read_unlock_bh();
295 if (--st->bucket < 0) 295 if (--st->bucket < 0)
@@ -361,8 +361,8 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
361 dev_queue_xmit) : 0, 361 dev_queue_xmit) : 0,
362 r->rt_spec_dst); 362 r->rt_spec_dst);
363 seq_printf(seq, "%-127s\n", temp); 363 seq_printf(seq, "%-127s\n", temp);
364 } 364 }
365 return 0; 365 return 0;
366} 366}
367 367
368static struct seq_operations rt_cache_seq_ops = { 368static struct seq_operations rt_cache_seq_ops = {
@@ -429,7 +429,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
429 return &per_cpu(rt_cache_stat, cpu); 429 return &per_cpu(rt_cache_stat, cpu);
430 } 430 }
431 return NULL; 431 return NULL;
432 432
433} 433}
434 434
435static void rt_cpu_seq_stop(struct seq_file *seq, void *v) 435static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
@@ -445,7 +445,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
445 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); 445 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
446 return 0; 446 return 0;
447 } 447 }
448 448
449 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x " 449 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
450 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n", 450 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
451 atomic_read(&ipv4_dst_ops.entries), 451 atomic_read(&ipv4_dst_ops.entries),
@@ -459,7 +459,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
459 459
460 st->out_hit, 460 st->out_hit,
461 st->out_slow_tot, 461 st->out_slow_tot,
462 st->out_slow_mc, 462 st->out_slow_mc,
463 463
464 st->gc_total, 464 st->gc_total,
465 st->gc_ignored, 465 st->gc_ignored,
@@ -493,7 +493,7 @@ static struct file_operations rt_cpu_seq_fops = {
493}; 493};
494 494
495#endif /* CONFIG_PROC_FS */ 495#endif /* CONFIG_PROC_FS */
496 496
497static __inline__ void rt_free(struct rtable *rt) 497static __inline__ void rt_free(struct rtable *rt)
498{ 498{
499 multipath_remove(rt); 499 multipath_remove(rt);
@@ -512,7 +512,7 @@ static __inline__ int rt_fast_clean(struct rtable *rth)
512 /* Kill broadcast/multicast entries very aggresively, if they 512 /* Kill broadcast/multicast entries very aggresively, if they
513 collide in hash table with more useful entries */ 513 collide in hash table with more useful entries */
514 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && 514 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
515 rth->fl.iif && rth->u.rt_next; 515 rth->fl.iif && rth->u.dst.rt_next;
516} 516}
517 517
518static __inline__ int rt_valuable(struct rtable *rth) 518static __inline__ int rt_valuable(struct rtable *rth)
@@ -595,10 +595,10 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
595 if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 && 595 if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 &&
596 compare_keys(&(*rthp)->fl, &expentry->fl)) { 596 compare_keys(&(*rthp)->fl, &expentry->fl)) {
597 if (*rthp == expentry) { 597 if (*rthp == expentry) {
598 *rthp = rth->u.rt_next; 598 *rthp = rth->u.dst.rt_next;
599 continue; 599 continue;
600 } else { 600 } else {
601 *rthp = rth->u.rt_next; 601 *rthp = rth->u.dst.rt_next;
602 rt_free(rth); 602 rt_free(rth);
603 if (removed_count) 603 if (removed_count)
604 ++(*removed_count); 604 ++(*removed_count);
@@ -606,9 +606,9 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
606 } else { 606 } else {
607 if (!((*rthp)->u.dst.flags & DST_BALANCED) && 607 if (!((*rthp)->u.dst.flags & DST_BALANCED) &&
608 passedexpired && !nextstep) 608 passedexpired && !nextstep)
609 nextstep = &rth->u.rt_next; 609 nextstep = &rth->u.dst.rt_next;
610 610
611 rthp = &rth->u.rt_next; 611 rthp = &rth->u.dst.rt_next;
612 } 612 }
613 } 613 }
614 614
@@ -649,12 +649,12 @@ static void rt_check_expire(unsigned long dummy)
649 /* Entry is expired even if it is in use */ 649 /* Entry is expired even if it is in use */
650 if (time_before_eq(now, rth->u.dst.expires)) { 650 if (time_before_eq(now, rth->u.dst.expires)) {
651 tmo >>= 1; 651 tmo >>= 1;
652 rthp = &rth->u.rt_next; 652 rthp = &rth->u.dst.rt_next;
653 continue; 653 continue;
654 } 654 }
655 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) { 655 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
656 tmo >>= 1; 656 tmo >>= 1;
657 rthp = &rth->u.rt_next; 657 rthp = &rth->u.dst.rt_next;
658 continue; 658 continue;
659 } 659 }
660 660
@@ -668,12 +668,12 @@ static void rt_check_expire(unsigned long dummy)
668 if (!rthp) 668 if (!rthp)
669 break; 669 break;
670 } else { 670 } else {
671 *rthp = rth->u.rt_next; 671 *rthp = rth->u.dst.rt_next;
672 rt_free(rth); 672 rt_free(rth);
673 } 673 }
674#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ 674#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
675 *rthp = rth->u.rt_next; 675 *rthp = rth->u.dst.rt_next;
676 rt_free(rth); 676 rt_free(rth);
677#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ 677#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
678 } 678 }
679 spin_unlock(rt_hash_lock_addr(i)); 679 spin_unlock(rt_hash_lock_addr(i));
@@ -706,7 +706,7 @@ static void rt_run_flush(unsigned long dummy)
706 spin_unlock_bh(rt_hash_lock_addr(i)); 706 spin_unlock_bh(rt_hash_lock_addr(i));
707 707
708 for (; rth; rth = next) { 708 for (; rth; rth = next) {
709 next = rth->u.rt_next; 709 next = rth->u.dst.rt_next;
710 rt_free(rth); 710 rt_free(rth);
711 } 711 }
712 } 712 }
@@ -739,7 +739,7 @@ void rt_cache_flush(int delay)
739 739
740 if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay) 740 if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay)
741 tmo = 0; 741 tmo = 0;
742 742
743 if (delay > tmo) 743 if (delay > tmo)
744 delay = tmo; 744 delay = tmo;
745 } 745 }
@@ -840,7 +840,7 @@ static int rt_garbage_collect(void)
840 while ((rth = *rthp) != NULL) { 840 while ((rth = *rthp) != NULL) {
841 if (!rt_may_expire(rth, tmo, expire)) { 841 if (!rt_may_expire(rth, tmo, expire)) {
842 tmo >>= 1; 842 tmo >>= 1;
843 rthp = &rth->u.rt_next; 843 rthp = &rth->u.dst.rt_next;
844 continue; 844 continue;
845 } 845 }
846#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED 846#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
@@ -858,12 +858,12 @@ static int rt_garbage_collect(void)
858 if (!rthp) 858 if (!rthp)
859 break; 859 break;
860 } else { 860 } else {
861 *rthp = rth->u.rt_next; 861 *rthp = rth->u.dst.rt_next;
862 rt_free(rth); 862 rt_free(rth);
863 goal--; 863 goal--;
864 } 864 }
865#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ 865#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
866 *rthp = rth->u.rt_next; 866 *rthp = rth->u.dst.rt_next;
867 rt_free(rth); 867 rt_free(rth);
868 goal--; 868 goal--;
869#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ 869#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
@@ -947,13 +947,13 @@ restart:
947 if (compare_keys(&rth->fl, &rt->fl)) { 947 if (compare_keys(&rth->fl, &rt->fl)) {
948#endif 948#endif
949 /* Put it first */ 949 /* Put it first */
950 *rthp = rth->u.rt_next; 950 *rthp = rth->u.dst.rt_next;
951 /* 951 /*
952 * Since lookup is lockfree, the deletion 952 * Since lookup is lockfree, the deletion
953 * must be visible to another weakly ordered CPU before 953 * must be visible to another weakly ordered CPU before
954 * the insertion at the start of the hash chain. 954 * the insertion at the start of the hash chain.
955 */ 955 */
956 rcu_assign_pointer(rth->u.rt_next, 956 rcu_assign_pointer(rth->u.dst.rt_next,
957 rt_hash_table[hash].chain); 957 rt_hash_table[hash].chain);
958 /* 958 /*
959 * Since lookup is lockfree, the update writes 959 * Since lookup is lockfree, the update writes
@@ -983,7 +983,7 @@ restart:
983 983
984 chain_length++; 984 chain_length++;
985 985
986 rthp = &rth->u.rt_next; 986 rthp = &rth->u.dst.rt_next;
987 } 987 }
988 988
989 if (cand) { 989 if (cand) {
@@ -994,7 +994,7 @@ restart:
994 * only 2 entries per bucket. We will see. 994 * only 2 entries per bucket. We will see.
995 */ 995 */
996 if (chain_length > ip_rt_gc_elasticity) { 996 if (chain_length > ip_rt_gc_elasticity) {
997 *candp = cand->u.rt_next; 997 *candp = cand->u.dst.rt_next;
998 rt_free(cand); 998 rt_free(cand);
999 } 999 }
1000 } 1000 }
@@ -1034,13 +1034,13 @@ restart:
1034 } 1034 }
1035 } 1035 }
1036 1036
1037 rt->u.rt_next = rt_hash_table[hash].chain; 1037 rt->u.dst.rt_next = rt_hash_table[hash].chain;
1038#if RT_CACHE_DEBUG >= 2 1038#if RT_CACHE_DEBUG >= 2
1039 if (rt->u.rt_next) { 1039 if (rt->u.dst.rt_next) {
1040 struct rtable *trt; 1040 struct rtable *trt;
1041 printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash, 1041 printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
1042 NIPQUAD(rt->rt_dst)); 1042 NIPQUAD(rt->rt_dst));
1043 for (trt = rt->u.rt_next; trt; trt = trt->u.rt_next) 1043 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
1044 printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst)); 1044 printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
1045 printk("\n"); 1045 printk("\n");
1046 } 1046 }
@@ -1104,7 +1104,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1104 return; 1104 return;
1105 } 1105 }
1106 } else 1106 } else
1107 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", 1107 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1108 __builtin_return_address(0)); 1108 __builtin_return_address(0));
1109 1109
1110 ip_select_fb_ident(iph); 1110 ip_select_fb_ident(iph);
@@ -1117,9 +1117,9 @@ static void rt_del(unsigned hash, struct rtable *rt)
1117 spin_lock_bh(rt_hash_lock_addr(hash)); 1117 spin_lock_bh(rt_hash_lock_addr(hash));
1118 ip_rt_put(rt); 1118 ip_rt_put(rt);
1119 for (rthp = &rt_hash_table[hash].chain; *rthp; 1119 for (rthp = &rt_hash_table[hash].chain; *rthp;
1120 rthp = &(*rthp)->u.rt_next) 1120 rthp = &(*rthp)->u.dst.rt_next)
1121 if (*rthp == rt) { 1121 if (*rthp == rt) {
1122 *rthp = rt->u.rt_next; 1122 *rthp = rt->u.dst.rt_next;
1123 rt_free(rt); 1123 rt_free(rt);
1124 break; 1124 break;
1125 } 1125 }
@@ -1167,7 +1167,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1167 rth->fl.fl4_src != skeys[i] || 1167 rth->fl.fl4_src != skeys[i] ||
1168 rth->fl.oif != ikeys[k] || 1168 rth->fl.oif != ikeys[k] ||
1169 rth->fl.iif != 0) { 1169 rth->fl.iif != 0) {
1170 rthp = &rth->u.rt_next; 1170 rthp = &rth->u.dst.rt_next;
1171 continue; 1171 continue;
1172 } 1172 }
1173 1173
@@ -1190,7 +1190,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1190 1190
1191 /* Copy all the information. */ 1191 /* Copy all the information. */
1192 *rt = *rth; 1192 *rt = *rth;
1193 INIT_RCU_HEAD(&rt->u.dst.rcu_head); 1193 INIT_RCU_HEAD(&rt->u.dst.rcu_head);
1194 rt->u.dst.__use = 1; 1194 rt->u.dst.__use = 1;
1195 atomic_set(&rt->u.dst.__refcnt, 1); 1195 atomic_set(&rt->u.dst.__refcnt, 1);
1196 rt->u.dst.child = NULL; 1196 rt->u.dst.child = NULL;
@@ -1225,11 +1225,11 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1225 rt_drop(rt); 1225 rt_drop(rt);
1226 goto do_next; 1226 goto do_next;
1227 } 1227 }
1228 1228
1229 netevent.old = &rth->u.dst; 1229 netevent.old = &rth->u.dst;
1230 netevent.new = &rt->u.dst; 1230 netevent.new = &rt->u.dst;
1231 call_netevent_notifiers(NETEVENT_REDIRECT, 1231 call_netevent_notifiers(NETEVENT_REDIRECT,
1232 &netevent); 1232 &netevent);
1233 1233
1234 rt_del(hash, rth); 1234 rt_del(hash, rth);
1235 if (!rt_intern_hash(hash, rt, &rt)) 1235 if (!rt_intern_hash(hash, rt, &rt))
@@ -1343,7 +1343,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1343#endif 1343#endif
1344 } 1344 }
1345out: 1345out:
1346 in_dev_put(in_dev); 1346 in_dev_put(in_dev);
1347} 1347}
1348 1348
1349static int ip_error(struct sk_buff *skb) 1349static int ip_error(struct sk_buff *skb)
@@ -1379,7 +1379,7 @@ static int ip_error(struct sk_buff *skb)
1379 1379
1380out: kfree_skb(skb); 1380out: kfree_skb(skb);
1381 return 0; 1381 return 0;
1382} 1382}
1383 1383
1384/* 1384/*
1385 * The last two values are not from the RFC but 1385 * The last two values are not from the RFC but
@@ -1392,7 +1392,7 @@ static const unsigned short mtu_plateau[] =
1392static __inline__ unsigned short guess_mtu(unsigned short old_mtu) 1392static __inline__ unsigned short guess_mtu(unsigned short old_mtu)
1393{ 1393{
1394 int i; 1394 int i;
1395 1395
1396 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++) 1396 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1397 if (old_mtu > mtu_plateau[i]) 1397 if (old_mtu > mtu_plateau[i])
1398 return mtu_plateau[i]; 1398 return mtu_plateau[i];
@@ -1416,7 +1416,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1416 1416
1417 rcu_read_lock(); 1417 rcu_read_lock();
1418 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 1418 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1419 rth = rcu_dereference(rth->u.rt_next)) { 1419 rth = rcu_dereference(rth->u.dst.rt_next)) {
1420 if (rth->fl.fl4_dst == daddr && 1420 if (rth->fl.fl4_dst == daddr &&
1421 rth->fl.fl4_src == skeys[i] && 1421 rth->fl.fl4_src == skeys[i] &&
1422 rth->rt_dst == daddr && 1422 rth->rt_dst == daddr &&
@@ -1436,7 +1436,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1436 mtu = guess_mtu(old_mtu); 1436 mtu = guess_mtu(old_mtu);
1437 } 1437 }
1438 if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) { 1438 if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) {
1439 if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) { 1439 if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) {
1440 dst_confirm(&rth->u.dst); 1440 dst_confirm(&rth->u.dst);
1441 if (mtu < ip_rt_min_pmtu) { 1441 if (mtu < ip_rt_min_pmtu) {
1442 mtu = ip_rt_min_pmtu; 1442 mtu = ip_rt_min_pmtu;
@@ -1600,7 +1600,7 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1600#endif 1600#endif
1601 set_class_tag(rt, itag); 1601 set_class_tag(rt, itag);
1602#endif 1602#endif
1603 rt->rt_type = res->type; 1603 rt->rt_type = res->type;
1604} 1604}
1605 1605
1606static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1606static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -1714,11 +1714,11 @@ static void ip_handle_martian_source(struct net_device *dev,
1714#endif 1714#endif
1715} 1715}
1716 1716
1717static inline int __mkroute_input(struct sk_buff *skb, 1717static inline int __mkroute_input(struct sk_buff *skb,
1718 struct fib_result* res, 1718 struct fib_result* res,
1719 struct in_device *in_dev, 1719 struct in_device *in_dev,
1720 __be32 daddr, __be32 saddr, u32 tos, 1720 __be32 daddr, __be32 saddr, u32 tos,
1721 struct rtable **result) 1721 struct rtable **result)
1722{ 1722{
1723 1723
1724 struct rtable *rth; 1724 struct rtable *rth;
@@ -1738,12 +1738,12 @@ static inline int __mkroute_input(struct sk_buff *skb,
1738 } 1738 }
1739 1739
1740 1740
1741 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res), 1741 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1742 in_dev->dev, &spec_dst, &itag); 1742 in_dev->dev, &spec_dst, &itag);
1743 if (err < 0) { 1743 if (err < 0) {
1744 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1744 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1745 saddr); 1745 saddr);
1746 1746
1747 err = -EINVAL; 1747 err = -EINVAL;
1748 goto cleanup; 1748 goto cleanup;
1749 } 1749 }
@@ -1811,10 +1811,10 @@ static inline int __mkroute_input(struct sk_buff *skb,
1811 /* release the working reference to the output device */ 1811 /* release the working reference to the output device */
1812 in_dev_put(out_dev); 1812 in_dev_put(out_dev);
1813 return err; 1813 return err;
1814} 1814}
1815 1815
1816static inline int ip_mkroute_input_def(struct sk_buff *skb, 1816static inline int ip_mkroute_input_def(struct sk_buff *skb,
1817 struct fib_result* res, 1817 struct fib_result* res,
1818 const struct flowi *fl, 1818 const struct flowi *fl,
1819 struct in_device *in_dev, 1819 struct in_device *in_dev,
1820 __be32 daddr, __be32 saddr, u32 tos) 1820 __be32 daddr, __be32 saddr, u32 tos)
@@ -1835,11 +1835,11 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb,
1835 1835
1836 /* put it into the cache */ 1836 /* put it into the cache */
1837 hash = rt_hash(daddr, saddr, fl->iif); 1837 hash = rt_hash(daddr, saddr, fl->iif);
1838 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); 1838 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
1839} 1839}
1840 1840
1841static inline int ip_mkroute_input(struct sk_buff *skb, 1841static inline int ip_mkroute_input(struct sk_buff *skb,
1842 struct fib_result* res, 1842 struct fib_result* res,
1843 const struct flowi *fl, 1843 const struct flowi *fl,
1844 struct in_device *in_dev, 1844 struct in_device *in_dev,
1845 __be32 daddr, __be32 saddr, u32 tos) 1845 __be32 daddr, __be32 saddr, u32 tos)
@@ -1859,7 +1859,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb,
1859 if (hopcount < 2) 1859 if (hopcount < 2)
1860 return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, 1860 return ip_mkroute_input_def(skb, res, fl, in_dev, daddr,
1861 saddr, tos); 1861 saddr, tos);
1862 1862
1863 /* add all alternatives to the routing cache */ 1863 /* add all alternatives to the routing cache */
1864 for (hop = 0; hop < hopcount; hop++) { 1864 for (hop = 0; hop < hopcount; hop++) {
1865 res->nh_sel = hop; 1865 res->nh_sel = hop;
@@ -1988,7 +1988,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1988 goto e_nobufs; 1988 goto e_nobufs;
1989 if (err == -EINVAL) 1989 if (err == -EINVAL)
1990 goto e_inval; 1990 goto e_inval;
1991 1991
1992done: 1992done:
1993 in_dev_put(in_dev); 1993 in_dev_put(in_dev);
1994 if (free_res) 1994 if (free_res)
@@ -2071,8 +2071,8 @@ martian_destination:
2071#endif 2071#endif
2072 2072
2073e_hostunreach: 2073e_hostunreach:
2074 err = -EHOSTUNREACH; 2074 err = -EHOSTUNREACH;
2075 goto done; 2075 goto done;
2076 2076
2077e_inval: 2077e_inval:
2078 err = -EINVAL; 2078 err = -EINVAL;
@@ -2099,7 +2099,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2099 2099
2100 rcu_read_lock(); 2100 rcu_read_lock();
2101 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2101 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2102 rth = rcu_dereference(rth->u.rt_next)) { 2102 rth = rcu_dereference(rth->u.dst.rt_next)) {
2103 if (rth->fl.fl4_dst == daddr && 2103 if (rth->fl.fl4_dst == daddr &&
2104 rth->fl.fl4_src == saddr && 2104 rth->fl.fl4_src == saddr &&
2105 rth->fl.iif == iif && 2105 rth->fl.iif == iif &&
@@ -2153,11 +2153,11 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2153} 2153}
2154 2154
2155static inline int __mkroute_output(struct rtable **result, 2155static inline int __mkroute_output(struct rtable **result,
2156 struct fib_result* res, 2156 struct fib_result* res,
2157 const struct flowi *fl, 2157 const struct flowi *fl,
2158 const struct flowi *oldflp, 2158 const struct flowi *oldflp,
2159 struct net_device *dev_out, 2159 struct net_device *dev_out,
2160 unsigned flags) 2160 unsigned flags)
2161{ 2161{
2162 struct rtable *rth; 2162 struct rtable *rth;
2163 struct in_device *in_dev; 2163 struct in_device *in_dev;
@@ -2190,7 +2190,7 @@ static inline int __mkroute_output(struct rtable **result,
2190 } 2190 }
2191 } else if (res->type == RTN_MULTICAST) { 2191 } else if (res->type == RTN_MULTICAST) {
2192 flags |= RTCF_MULTICAST|RTCF_LOCAL; 2192 flags |= RTCF_MULTICAST|RTCF_LOCAL;
2193 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src, 2193 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2194 oldflp->proto)) 2194 oldflp->proto))
2195 flags &= ~RTCF_LOCAL; 2195 flags &= ~RTCF_LOCAL;
2196 /* If multicast route do not exist use 2196 /* If multicast route do not exist use
@@ -2208,7 +2208,7 @@ static inline int __mkroute_output(struct rtable **result,
2208 if (!rth) { 2208 if (!rth) {
2209 err = -ENOBUFS; 2209 err = -ENOBUFS;
2210 goto cleanup; 2210 goto cleanup;
2211 } 2211 }
2212 2212
2213 atomic_set(&rth->u.dst.__refcnt, 1); 2213 atomic_set(&rth->u.dst.__refcnt, 1);
2214 rth->u.dst.flags= DST_HOST; 2214 rth->u.dst.flags= DST_HOST;
@@ -2232,7 +2232,7 @@ static inline int __mkroute_output(struct rtable **result,
2232 rth->rt_dst = fl->fl4_dst; 2232 rth->rt_dst = fl->fl4_dst;
2233 rth->rt_src = fl->fl4_src; 2233 rth->rt_src = fl->fl4_src;
2234 rth->rt_iif = oldflp->oif ? : dev_out->ifindex; 2234 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2235 /* get references to the devices that are to be hold by the routing 2235 /* get references to the devices that are to be hold by the routing
2236 cache entry */ 2236 cache entry */
2237 rth->u.dst.dev = dev_out; 2237 rth->u.dst.dev = dev_out;
2238 dev_hold(dev_out); 2238 dev_hold(dev_out);
@@ -2250,7 +2250,7 @@ static inline int __mkroute_output(struct rtable **result,
2250 } 2250 }
2251 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 2251 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2252 rth->rt_spec_dst = fl->fl4_src; 2252 rth->rt_spec_dst = fl->fl4_src;
2253 if (flags & RTCF_LOCAL && 2253 if (flags & RTCF_LOCAL &&
2254 !(dev_out->flags & IFF_LOOPBACK)) { 2254 !(dev_out->flags & IFF_LOOPBACK)) {
2255 rth->u.dst.output = ip_mc_output; 2255 rth->u.dst.output = ip_mc_output;
2256 RT_CACHE_STAT_INC(out_slow_mc); 2256 RT_CACHE_STAT_INC(out_slow_mc);
@@ -2292,7 +2292,7 @@ static inline int ip_mkroute_output_def(struct rtable **rp,
2292 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif); 2292 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif);
2293 err = rt_intern_hash(hash, rth, rp); 2293 err = rt_intern_hash(hash, rth, rp);
2294 } 2294 }
2295 2295
2296 return err; 2296 return err;
2297} 2297}
2298 2298
@@ -2563,7 +2563,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
2563 2563
2564 rcu_read_lock_bh(); 2564 rcu_read_lock_bh();
2565 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2565 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2566 rth = rcu_dereference(rth->u.rt_next)) { 2566 rth = rcu_dereference(rth->u.dst.rt_next)) {
2567 if (rth->fl.fl4_dst == flp->fl4_dst && 2567 if (rth->fl.fl4_dst == flp->fl4_dst &&
2568 rth->fl.fl4_src == flp->fl4_src && 2568 rth->fl.fl4_src == flp->fl4_src &&
2569 rth->fl.iif == 0 && 2569 rth->fl.iif == 0 &&
@@ -2825,12 +2825,12 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2825 s_idx = 0; 2825 s_idx = 0;
2826 rcu_read_lock_bh(); 2826 rcu_read_lock_bh();
2827 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; 2827 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2828 rt = rcu_dereference(rt->u.rt_next), idx++) { 2828 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2829 if (idx < s_idx) 2829 if (idx < s_idx)
2830 continue; 2830 continue;
2831 skb->dst = dst_clone(&rt->u.dst); 2831 skb->dst = dst_clone(&rt->u.dst);
2832 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 2832 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
2833 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 2833 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2834 1, NLM_F_MULTI) <= 0) { 2834 1, NLM_F_MULTI) <= 0) {
2835 dst_release(xchg(&skb->dst, NULL)); 2835 dst_release(xchg(&skb->dst, NULL));
2836 rcu_read_unlock_bh(); 2836 rcu_read_unlock_bh();
@@ -2863,7 +2863,7 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write,
2863 proc_dointvec(ctl, write, filp, buffer, lenp, ppos); 2863 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2864 rt_cache_flush(flush_delay); 2864 rt_cache_flush(flush_delay);
2865 return 0; 2865 return 0;
2866 } 2866 }
2867 2867
2868 return -EINVAL; 2868 return -EINVAL;
2869} 2869}
@@ -2880,13 +2880,13 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
2880 if (newlen != sizeof(int)) 2880 if (newlen != sizeof(int))
2881 return -EINVAL; 2881 return -EINVAL;
2882 if (get_user(delay, (int __user *)newval)) 2882 if (get_user(delay, (int __user *)newval))
2883 return -EFAULT; 2883 return -EFAULT;
2884 rt_cache_flush(delay); 2884 rt_cache_flush(delay);
2885 return 0; 2885 return 0;
2886} 2886}
2887 2887
2888ctl_table ipv4_route_table[] = { 2888ctl_table ipv4_route_table[] = {
2889 { 2889 {
2890 .ctl_name = NET_IPV4_ROUTE_FLUSH, 2890 .ctl_name = NET_IPV4_ROUTE_FLUSH,
2891 .procname = "flush", 2891 .procname = "flush",
2892 .data = &flush_delay, 2892 .data = &flush_delay,
@@ -2931,7 +2931,7 @@ ctl_table ipv4_route_table[] = {
2931 }, 2931 },
2932 { 2932 {
2933 /* Deprecated. Use gc_min_interval_ms */ 2933 /* Deprecated. Use gc_min_interval_ms */
2934 2934
2935 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL, 2935 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
2936 .procname = "gc_min_interval", 2936 .procname = "gc_min_interval",
2937 .data = &ip_rt_gc_min_interval, 2937 .data = &ip_rt_gc_min_interval,
@@ -3180,8 +3180,8 @@ int __init ip_rt_init(void)
3180 { 3180 {
3181 struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */ 3181 struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */
3182 if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) || 3182 if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
3183 !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, 3183 !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO,
3184 proc_net_stat))) { 3184 proc_net_stat))) {
3185 return -ENOMEM; 3185 return -ENOMEM;
3186 } 3186 }
3187 rtstat_pde->proc_fops = &rt_cpu_seq_fops; 3187 rtstat_pde->proc_fops = &rt_cpu_seq_fops;