aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2007-02-09 19:19:26 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-02-11 02:20:38 -0500
commit093c2ca4167cf66f69020329d14138da0da8599b (patch)
tree7bae584ac848923867037324197068c869b62a46 /net
parent75ce7ceaa1221858c0163e75d19eb8a423a212ff (diff)
[IPV4]: Convert ipv4 route to use the new dst_entry 'next' pointer
This patch removes the rt_next pointer from 'struct rtable.u' union, and renames u.rt_next to u.dst_rt_next. It also moves 'struct flowi' right after 'struct dst_entry' to prepare the gain on lookups. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/route.c56
1 files changed, 28 insertions, 28 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 56d6602affb4..5b3834b38a2d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -289,7 +289,7 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
289{ 289{
290 struct rt_cache_iter_state *st = rcu_dereference(seq->private); 290 struct rt_cache_iter_state *st = rcu_dereference(seq->private);
291 291
292 r = r->u.rt_next; 292 r = r->u.dst.rt_next;
293 while (!r) { 293 while (!r) {
294 rcu_read_unlock_bh(); 294 rcu_read_unlock_bh();
295 if (--st->bucket < 0) 295 if (--st->bucket < 0)
@@ -512,7 +512,7 @@ static __inline__ int rt_fast_clean(struct rtable *rth)
512 /* Kill broadcast/multicast entries very aggresively, if they 512 /* Kill broadcast/multicast entries very aggresively, if they
513 collide in hash table with more useful entries */ 513 collide in hash table with more useful entries */
514 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && 514 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
515 rth->fl.iif && rth->u.rt_next; 515 rth->fl.iif && rth->u.dst.rt_next;
516} 516}
517 517
518static __inline__ int rt_valuable(struct rtable *rth) 518static __inline__ int rt_valuable(struct rtable *rth)
@@ -595,10 +595,10 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
595 if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 && 595 if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 &&
596 compare_keys(&(*rthp)->fl, &expentry->fl)) { 596 compare_keys(&(*rthp)->fl, &expentry->fl)) {
597 if (*rthp == expentry) { 597 if (*rthp == expentry) {
598 *rthp = rth->u.rt_next; 598 *rthp = rth->u.dst.rt_next;
599 continue; 599 continue;
600 } else { 600 } else {
601 *rthp = rth->u.rt_next; 601 *rthp = rth->u.dst.rt_next;
602 rt_free(rth); 602 rt_free(rth);
603 if (removed_count) 603 if (removed_count)
604 ++(*removed_count); 604 ++(*removed_count);
@@ -606,9 +606,9 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
606 } else { 606 } else {
607 if (!((*rthp)->u.dst.flags & DST_BALANCED) && 607 if (!((*rthp)->u.dst.flags & DST_BALANCED) &&
608 passedexpired && !nextstep) 608 passedexpired && !nextstep)
609 nextstep = &rth->u.rt_next; 609 nextstep = &rth->u.dst.rt_next;
610 610
611 rthp = &rth->u.rt_next; 611 rthp = &rth->u.dst.rt_next;
612 } 612 }
613 } 613 }
614 614
@@ -649,12 +649,12 @@ static void rt_check_expire(unsigned long dummy)
649 /* Entry is expired even if it is in use */ 649 /* Entry is expired even if it is in use */
650 if (time_before_eq(now, rth->u.dst.expires)) { 650 if (time_before_eq(now, rth->u.dst.expires)) {
651 tmo >>= 1; 651 tmo >>= 1;
652 rthp = &rth->u.rt_next; 652 rthp = &rth->u.dst.rt_next;
653 continue; 653 continue;
654 } 654 }
655 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) { 655 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
656 tmo >>= 1; 656 tmo >>= 1;
657 rthp = &rth->u.rt_next; 657 rthp = &rth->u.dst.rt_next;
658 continue; 658 continue;
659 } 659 }
660 660
@@ -668,11 +668,11 @@ static void rt_check_expire(unsigned long dummy)
668 if (!rthp) 668 if (!rthp)
669 break; 669 break;
670 } else { 670 } else {
671 *rthp = rth->u.rt_next; 671 *rthp = rth->u.dst.rt_next;
672 rt_free(rth); 672 rt_free(rth);
673 } 673 }
674#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ 674#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
675 *rthp = rth->u.rt_next; 675 *rthp = rth->u.dst.rt_next;
676 rt_free(rth); 676 rt_free(rth);
677#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ 677#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
678 } 678 }
@@ -706,7 +706,7 @@ static void rt_run_flush(unsigned long dummy)
706 spin_unlock_bh(rt_hash_lock_addr(i)); 706 spin_unlock_bh(rt_hash_lock_addr(i));
707 707
708 for (; rth; rth = next) { 708 for (; rth; rth = next) {
709 next = rth->u.rt_next; 709 next = rth->u.dst.rt_next;
710 rt_free(rth); 710 rt_free(rth);
711 } 711 }
712 } 712 }
@@ -840,7 +840,7 @@ static int rt_garbage_collect(void)
840 while ((rth = *rthp) != NULL) { 840 while ((rth = *rthp) != NULL) {
841 if (!rt_may_expire(rth, tmo, expire)) { 841 if (!rt_may_expire(rth, tmo, expire)) {
842 tmo >>= 1; 842 tmo >>= 1;
843 rthp = &rth->u.rt_next; 843 rthp = &rth->u.dst.rt_next;
844 continue; 844 continue;
845 } 845 }
846#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED 846#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
@@ -858,12 +858,12 @@ static int rt_garbage_collect(void)
858 if (!rthp) 858 if (!rthp)
859 break; 859 break;
860 } else { 860 } else {
861 *rthp = rth->u.rt_next; 861 *rthp = rth->u.dst.rt_next;
862 rt_free(rth); 862 rt_free(rth);
863 goal--; 863 goal--;
864 } 864 }
865#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ 865#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
866 *rthp = rth->u.rt_next; 866 *rthp = rth->u.dst.rt_next;
867 rt_free(rth); 867 rt_free(rth);
868 goal--; 868 goal--;
869#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ 869#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
@@ -947,13 +947,13 @@ restart:
947 if (compare_keys(&rth->fl, &rt->fl)) { 947 if (compare_keys(&rth->fl, &rt->fl)) {
948#endif 948#endif
949 /* Put it first */ 949 /* Put it first */
950 *rthp = rth->u.rt_next; 950 *rthp = rth->u.dst.rt_next;
951 /* 951 /*
952 * Since lookup is lockfree, the deletion 952 * Since lookup is lockfree, the deletion
953 * must be visible to another weakly ordered CPU before 953 * must be visible to another weakly ordered CPU before
954 * the insertion at the start of the hash chain. 954 * the insertion at the start of the hash chain.
955 */ 955 */
956 rcu_assign_pointer(rth->u.rt_next, 956 rcu_assign_pointer(rth->u.dst.rt_next,
957 rt_hash_table[hash].chain); 957 rt_hash_table[hash].chain);
958 /* 958 /*
959 * Since lookup is lockfree, the update writes 959 * Since lookup is lockfree, the update writes
@@ -983,7 +983,7 @@ restart:
983 983
984 chain_length++; 984 chain_length++;
985 985
986 rthp = &rth->u.rt_next; 986 rthp = &rth->u.dst.rt_next;
987 } 987 }
988 988
989 if (cand) { 989 if (cand) {
@@ -994,7 +994,7 @@ restart:
994 * only 2 entries per bucket. We will see. 994 * only 2 entries per bucket. We will see.
995 */ 995 */
996 if (chain_length > ip_rt_gc_elasticity) { 996 if (chain_length > ip_rt_gc_elasticity) {
997 *candp = cand->u.rt_next; 997 *candp = cand->u.dst.rt_next;
998 rt_free(cand); 998 rt_free(cand);
999 } 999 }
1000 } 1000 }
@@ -1034,13 +1034,13 @@ restart:
1034 } 1034 }
1035 } 1035 }
1036 1036
1037 rt->u.rt_next = rt_hash_table[hash].chain; 1037 rt->u.dst.rt_next = rt_hash_table[hash].chain;
1038#if RT_CACHE_DEBUG >= 2 1038#if RT_CACHE_DEBUG >= 2
1039 if (rt->u.rt_next) { 1039 if (rt->u.dst.rt_next) {
1040 struct rtable *trt; 1040 struct rtable *trt;
1041 printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash, 1041 printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
1042 NIPQUAD(rt->rt_dst)); 1042 NIPQUAD(rt->rt_dst));
1043 for (trt = rt->u.rt_next; trt; trt = trt->u.rt_next) 1043 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
1044 printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst)); 1044 printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
1045 printk("\n"); 1045 printk("\n");
1046 } 1046 }
@@ -1117,9 +1117,9 @@ static void rt_del(unsigned hash, struct rtable *rt)
1117 spin_lock_bh(rt_hash_lock_addr(hash)); 1117 spin_lock_bh(rt_hash_lock_addr(hash));
1118 ip_rt_put(rt); 1118 ip_rt_put(rt);
1119 for (rthp = &rt_hash_table[hash].chain; *rthp; 1119 for (rthp = &rt_hash_table[hash].chain; *rthp;
1120 rthp = &(*rthp)->u.rt_next) 1120 rthp = &(*rthp)->u.dst.rt_next)
1121 if (*rthp == rt) { 1121 if (*rthp == rt) {
1122 *rthp = rt->u.rt_next; 1122 *rthp = rt->u.dst.rt_next;
1123 rt_free(rt); 1123 rt_free(rt);
1124 break; 1124 break;
1125 } 1125 }
@@ -1167,7 +1167,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1167 rth->fl.fl4_src != skeys[i] || 1167 rth->fl.fl4_src != skeys[i] ||
1168 rth->fl.oif != ikeys[k] || 1168 rth->fl.oif != ikeys[k] ||
1169 rth->fl.iif != 0) { 1169 rth->fl.iif != 0) {
1170 rthp = &rth->u.rt_next; 1170 rthp = &rth->u.dst.rt_next;
1171 continue; 1171 continue;
1172 } 1172 }
1173 1173
@@ -1416,7 +1416,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1416 1416
1417 rcu_read_lock(); 1417 rcu_read_lock();
1418 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 1418 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1419 rth = rcu_dereference(rth->u.rt_next)) { 1419 rth = rcu_dereference(rth->u.dst.rt_next)) {
1420 if (rth->fl.fl4_dst == daddr && 1420 if (rth->fl.fl4_dst == daddr &&
1421 rth->fl.fl4_src == skeys[i] && 1421 rth->fl.fl4_src == skeys[i] &&
1422 rth->rt_dst == daddr && 1422 rth->rt_dst == daddr &&
@@ -2099,7 +2099,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2099 2099
2100 rcu_read_lock(); 2100 rcu_read_lock();
2101 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2101 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2102 rth = rcu_dereference(rth->u.rt_next)) { 2102 rth = rcu_dereference(rth->u.dst.rt_next)) {
2103 if (rth->fl.fl4_dst == daddr && 2103 if (rth->fl.fl4_dst == daddr &&
2104 rth->fl.fl4_src == saddr && 2104 rth->fl.fl4_src == saddr &&
2105 rth->fl.iif == iif && 2105 rth->fl.iif == iif &&
@@ -2563,7 +2563,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
2563 2563
2564 rcu_read_lock_bh(); 2564 rcu_read_lock_bh();
2565 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2565 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2566 rth = rcu_dereference(rth->u.rt_next)) { 2566 rth = rcu_dereference(rth->u.dst.rt_next)) {
2567 if (rth->fl.fl4_dst == flp->fl4_dst && 2567 if (rth->fl.fl4_dst == flp->fl4_dst &&
2568 rth->fl.fl4_src == flp->fl4_src && 2568 rth->fl.fl4_src == flp->fl4_src &&
2569 rth->fl.iif == 0 && 2569 rth->fl.iif == 0 &&
@@ -2825,7 +2825,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2825 s_idx = 0; 2825 s_idx = 0;
2826 rcu_read_lock_bh(); 2826 rcu_read_lock_bh();
2827 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; 2827 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2828 rt = rcu_dereference(rt->u.rt_next), idx++) { 2828 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2829 if (idx < s_idx) 2829 if (idx < s_idx)
2830 continue; 2830 continue;
2831 skb->dst = dst_clone(&rt->u.dst); 2831 skb->dst = dst_clone(&rt->u.dst);