aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/route.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r--net/ipv4/route.c420
1 files changed, 210 insertions, 210 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 883b5c7195ac..a291edbbc97f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -286,10 +286,10 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
286 rcu_read_lock_bh(); 286 rcu_read_lock_bh();
287 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); 287 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
288 while (r) { 288 while (r) {
289 if (dev_net(r->u.dst.dev) == seq_file_net(seq) && 289 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
290 r->rt_genid == st->genid) 290 r->rt_genid == st->genid)
291 return r; 291 return r;
292 r = rcu_dereference_bh(r->u.dst.rt_next); 292 r = rcu_dereference_bh(r->dst.rt_next);
293 } 293 }
294 rcu_read_unlock_bh(); 294 rcu_read_unlock_bh();
295 } 295 }
@@ -301,7 +301,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
301{ 301{
302 struct rt_cache_iter_state *st = seq->private; 302 struct rt_cache_iter_state *st = seq->private;
303 303
304 r = r->u.dst.rt_next; 304 r = r->dst.rt_next;
305 while (!r) { 305 while (!r) {
306 rcu_read_unlock_bh(); 306 rcu_read_unlock_bh();
307 do { 307 do {
@@ -319,7 +319,7 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq,
319{ 319{
320 struct rt_cache_iter_state *st = seq->private; 320 struct rt_cache_iter_state *st = seq->private;
321 while ((r = __rt_cache_get_next(seq, r)) != NULL) { 321 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
322 if (dev_net(r->u.dst.dev) != seq_file_net(seq)) 322 if (dev_net(r->dst.dev) != seq_file_net(seq))
323 continue; 323 continue;
324 if (r->rt_genid == st->genid) 324 if (r->rt_genid == st->genid)
325 break; 325 break;
@@ -377,19 +377,19 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
377 377
378 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" 378 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
379 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", 379 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
380 r->u.dst.dev ? r->u.dst.dev->name : "*", 380 r->dst.dev ? r->dst.dev->name : "*",
381 (__force u32)r->rt_dst, 381 (__force u32)r->rt_dst,
382 (__force u32)r->rt_gateway, 382 (__force u32)r->rt_gateway,
383 r->rt_flags, atomic_read(&r->u.dst.__refcnt), 383 r->rt_flags, atomic_read(&r->dst.__refcnt),
384 r->u.dst.__use, 0, (__force u32)r->rt_src, 384 r->dst.__use, 0, (__force u32)r->rt_src,
385 (dst_metric(&r->u.dst, RTAX_ADVMSS) ? 385 (dst_metric(&r->dst, RTAX_ADVMSS) ?
386 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0), 386 (int)dst_metric(&r->dst, RTAX_ADVMSS) + 40 : 0),
387 dst_metric(&r->u.dst, RTAX_WINDOW), 387 dst_metric(&r->dst, RTAX_WINDOW),
388 (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) + 388 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
389 dst_metric(&r->u.dst, RTAX_RTTVAR)), 389 dst_metric(&r->dst, RTAX_RTTVAR)),
390 r->fl.fl4_tos, 390 r->fl.fl4_tos,
391 r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1, 391 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
392 r->u.dst.hh ? (r->u.dst.hh->hh_output == 392 r->dst.hh ? (r->dst.hh->hh_output ==
393 dev_queue_xmit) : 0, 393 dev_queue_xmit) : 0,
394 r->rt_spec_dst, &len); 394 r->rt_spec_dst, &len);
395 395
@@ -608,13 +608,13 @@ static inline int ip_rt_proc_init(void)
608 608
609static inline void rt_free(struct rtable *rt) 609static inline void rt_free(struct rtable *rt)
610{ 610{
611 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 611 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
612} 612}
613 613
614static inline void rt_drop(struct rtable *rt) 614static inline void rt_drop(struct rtable *rt)
615{ 615{
616 ip_rt_put(rt); 616 ip_rt_put(rt);
617 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 617 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
618} 618}
619 619
620static inline int rt_fast_clean(struct rtable *rth) 620static inline int rt_fast_clean(struct rtable *rth)
@@ -622,13 +622,13 @@ static inline int rt_fast_clean(struct rtable *rth)
622 /* Kill broadcast/multicast entries very aggresively, if they 622 /* Kill broadcast/multicast entries very aggresively, if they
623 collide in hash table with more useful entries */ 623 collide in hash table with more useful entries */
624 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && 624 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
625 rth->fl.iif && rth->u.dst.rt_next; 625 rth->fl.iif && rth->dst.rt_next;
626} 626}
627 627
628static inline int rt_valuable(struct rtable *rth) 628static inline int rt_valuable(struct rtable *rth)
629{ 629{
630 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) || 630 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
631 rth->u.dst.expires; 631 rth->dst.expires;
632} 632}
633 633
634static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2) 634static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
@@ -636,15 +636,15 @@ static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long t
636 unsigned long age; 636 unsigned long age;
637 int ret = 0; 637 int ret = 0;
638 638
639 if (atomic_read(&rth->u.dst.__refcnt)) 639 if (atomic_read(&rth->dst.__refcnt))
640 goto out; 640 goto out;
641 641
642 ret = 1; 642 ret = 1;
643 if (rth->u.dst.expires && 643 if (rth->dst.expires &&
644 time_after_eq(jiffies, rth->u.dst.expires)) 644 time_after_eq(jiffies, rth->dst.expires))
645 goto out; 645 goto out;
646 646
647 age = jiffies - rth->u.dst.lastuse; 647 age = jiffies - rth->dst.lastuse;
648 ret = 0; 648 ret = 0;
649 if ((age <= tmo1 && !rt_fast_clean(rth)) || 649 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
650 (age <= tmo2 && rt_valuable(rth))) 650 (age <= tmo2 && rt_valuable(rth)))
@@ -660,7 +660,7 @@ out: return ret;
660 */ 660 */
661static inline u32 rt_score(struct rtable *rt) 661static inline u32 rt_score(struct rtable *rt)
662{ 662{
663 u32 score = jiffies - rt->u.dst.lastuse; 663 u32 score = jiffies - rt->dst.lastuse;
664 664
665 score = ~score & ~(3<<30); 665 score = ~score & ~(3<<30);
666 666
@@ -700,12 +700,12 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
700 700
701static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) 701static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
702{ 702{
703 return net_eq(dev_net(rt1->u.dst.dev), dev_net(rt2->u.dst.dev)); 703 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
704} 704}
705 705
706static inline int rt_is_expired(struct rtable *rth) 706static inline int rt_is_expired(struct rtable *rth)
707{ 707{
708 return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev)); 708 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
709} 709}
710 710
711/* 711/*
@@ -734,7 +734,7 @@ static void rt_do_flush(int process_context)
734 rth = rt_hash_table[i].chain; 734 rth = rt_hash_table[i].chain;
735 735
736 /* defer releasing the head of the list after spin_unlock */ 736 /* defer releasing the head of the list after spin_unlock */
737 for (tail = rth; tail; tail = tail->u.dst.rt_next) 737 for (tail = rth; tail; tail = tail->dst.rt_next)
738 if (!rt_is_expired(tail)) 738 if (!rt_is_expired(tail))
739 break; 739 break;
740 if (rth != tail) 740 if (rth != tail)
@@ -743,9 +743,9 @@ static void rt_do_flush(int process_context)
743 /* call rt_free on entries after the tail requiring flush */ 743 /* call rt_free on entries after the tail requiring flush */
744 prev = &rt_hash_table[i].chain; 744 prev = &rt_hash_table[i].chain;
745 for (p = *prev; p; p = next) { 745 for (p = *prev; p; p = next) {
746 next = p->u.dst.rt_next; 746 next = p->dst.rt_next;
747 if (!rt_is_expired(p)) { 747 if (!rt_is_expired(p)) {
748 prev = &p->u.dst.rt_next; 748 prev = &p->dst.rt_next;
749 } else { 749 } else {
750 *prev = next; 750 *prev = next;
751 rt_free(p); 751 rt_free(p);
@@ -760,7 +760,7 @@ static void rt_do_flush(int process_context)
760 spin_unlock_bh(rt_hash_lock_addr(i)); 760 spin_unlock_bh(rt_hash_lock_addr(i));
761 761
762 for (; rth != tail; rth = next) { 762 for (; rth != tail; rth = next) {
763 next = rth->u.dst.rt_next; 763 next = rth->dst.rt_next;
764 rt_free(rth); 764 rt_free(rth);
765 } 765 }
766 } 766 }
@@ -791,7 +791,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
791 while (aux != rth) { 791 while (aux != rth) {
792 if (compare_hash_inputs(&aux->fl, &rth->fl)) 792 if (compare_hash_inputs(&aux->fl, &rth->fl))
793 return 0; 793 return 0;
794 aux = aux->u.dst.rt_next; 794 aux = aux->dst.rt_next;
795 } 795 }
796 return ONE; 796 return ONE;
797} 797}
@@ -831,18 +831,18 @@ static void rt_check_expire(void)
831 length = 0; 831 length = 0;
832 spin_lock_bh(rt_hash_lock_addr(i)); 832 spin_lock_bh(rt_hash_lock_addr(i));
833 while ((rth = *rthp) != NULL) { 833 while ((rth = *rthp) != NULL) {
834 prefetch(rth->u.dst.rt_next); 834 prefetch(rth->dst.rt_next);
835 if (rt_is_expired(rth)) { 835 if (rt_is_expired(rth)) {
836 *rthp = rth->u.dst.rt_next; 836 *rthp = rth->dst.rt_next;
837 rt_free(rth); 837 rt_free(rth);
838 continue; 838 continue;
839 } 839 }
840 if (rth->u.dst.expires) { 840 if (rth->dst.expires) {
841 /* Entry is expired even if it is in use */ 841 /* Entry is expired even if it is in use */
842 if (time_before_eq(jiffies, rth->u.dst.expires)) { 842 if (time_before_eq(jiffies, rth->dst.expires)) {
843nofree: 843nofree:
844 tmo >>= 1; 844 tmo >>= 1;
845 rthp = &rth->u.dst.rt_next; 845 rthp = &rth->dst.rt_next;
846 /* 846 /*
847 * We only count entries on 847 * We only count entries on
848 * a chain with equal hash inputs once 848 * a chain with equal hash inputs once
@@ -858,7 +858,7 @@ nofree:
858 goto nofree; 858 goto nofree;
859 859
860 /* Cleanup aged off entries. */ 860 /* Cleanup aged off entries. */
861 *rthp = rth->u.dst.rt_next; 861 *rthp = rth->dst.rt_next;
862 rt_free(rth); 862 rt_free(rth);
863 } 863 }
864 spin_unlock_bh(rt_hash_lock_addr(i)); 864 spin_unlock_bh(rt_hash_lock_addr(i));
@@ -999,10 +999,10 @@ static int rt_garbage_collect(struct dst_ops *ops)
999 if (!rt_is_expired(rth) && 999 if (!rt_is_expired(rth) &&
1000 !rt_may_expire(rth, tmo, expire)) { 1000 !rt_may_expire(rth, tmo, expire)) {
1001 tmo >>= 1; 1001 tmo >>= 1;
1002 rthp = &rth->u.dst.rt_next; 1002 rthp = &rth->dst.rt_next;
1003 continue; 1003 continue;
1004 } 1004 }
1005 *rthp = rth->u.dst.rt_next; 1005 *rthp = rth->dst.rt_next;
1006 rt_free(rth); 1006 rt_free(rth);
1007 goal--; 1007 goal--;
1008 } 1008 }
@@ -1068,7 +1068,7 @@ static int slow_chain_length(const struct rtable *head)
1068 1068
1069 while (rth) { 1069 while (rth) {
1070 length += has_noalias(head, rth); 1070 length += has_noalias(head, rth);
1071 rth = rth->u.dst.rt_next; 1071 rth = rth->dst.rt_next;
1072 } 1072 }
1073 return length >> FRACT_BITS; 1073 return length >> FRACT_BITS;
1074} 1074}
@@ -1090,7 +1090,7 @@ restart:
1090 candp = NULL; 1090 candp = NULL;
1091 now = jiffies; 1091 now = jiffies;
1092 1092
1093 if (!rt_caching(dev_net(rt->u.dst.dev))) { 1093 if (!rt_caching(dev_net(rt->dst.dev))) {
1094 /* 1094 /*
1095 * If we're not caching, just tell the caller we 1095 * If we're not caching, just tell the caller we
1096 * were successful and don't touch the route. The 1096 * were successful and don't touch the route. The
@@ -1108,7 +1108,7 @@ restart:
1108 */ 1108 */
1109 1109
1110 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { 1110 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1111 int err = arp_bind_neighbour(&rt->u.dst); 1111 int err = arp_bind_neighbour(&rt->dst);
1112 if (err) { 1112 if (err) {
1113 if (net_ratelimit()) 1113 if (net_ratelimit())
1114 printk(KERN_WARNING 1114 printk(KERN_WARNING
@@ -1127,19 +1127,19 @@ restart:
1127 spin_lock_bh(rt_hash_lock_addr(hash)); 1127 spin_lock_bh(rt_hash_lock_addr(hash));
1128 while ((rth = *rthp) != NULL) { 1128 while ((rth = *rthp) != NULL) {
1129 if (rt_is_expired(rth)) { 1129 if (rt_is_expired(rth)) {
1130 *rthp = rth->u.dst.rt_next; 1130 *rthp = rth->dst.rt_next;
1131 rt_free(rth); 1131 rt_free(rth);
1132 continue; 1132 continue;
1133 } 1133 }
1134 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) { 1134 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
1135 /* Put it first */ 1135 /* Put it first */
1136 *rthp = rth->u.dst.rt_next; 1136 *rthp = rth->dst.rt_next;
1137 /* 1137 /*
1138 * Since lookup is lockfree, the deletion 1138 * Since lookup is lockfree, the deletion
1139 * must be visible to another weakly ordered CPU before 1139 * must be visible to another weakly ordered CPU before
1140 * the insertion at the start of the hash chain. 1140 * the insertion at the start of the hash chain.
1141 */ 1141 */
1142 rcu_assign_pointer(rth->u.dst.rt_next, 1142 rcu_assign_pointer(rth->dst.rt_next,
1143 rt_hash_table[hash].chain); 1143 rt_hash_table[hash].chain);
1144 /* 1144 /*
1145 * Since lookup is lockfree, the update writes 1145 * Since lookup is lockfree, the update writes
@@ -1147,18 +1147,18 @@ restart:
1147 */ 1147 */
1148 rcu_assign_pointer(rt_hash_table[hash].chain, rth); 1148 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1149 1149
1150 dst_use(&rth->u.dst, now); 1150 dst_use(&rth->dst, now);
1151 spin_unlock_bh(rt_hash_lock_addr(hash)); 1151 spin_unlock_bh(rt_hash_lock_addr(hash));
1152 1152
1153 rt_drop(rt); 1153 rt_drop(rt);
1154 if (rp) 1154 if (rp)
1155 *rp = rth; 1155 *rp = rth;
1156 else 1156 else
1157 skb_dst_set(skb, &rth->u.dst); 1157 skb_dst_set(skb, &rth->dst);
1158 return 0; 1158 return 0;
1159 } 1159 }
1160 1160
1161 if (!atomic_read(&rth->u.dst.__refcnt)) { 1161 if (!atomic_read(&rth->dst.__refcnt)) {
1162 u32 score = rt_score(rth); 1162 u32 score = rt_score(rth);
1163 1163
1164 if (score <= min_score) { 1164 if (score <= min_score) {
@@ -1170,7 +1170,7 @@ restart:
1170 1170
1171 chain_length++; 1171 chain_length++;
1172 1172
1173 rthp = &rth->u.dst.rt_next; 1173 rthp = &rth->dst.rt_next;
1174 } 1174 }
1175 1175
1176 if (cand) { 1176 if (cand) {
@@ -1181,17 +1181,17 @@ restart:
1181 * only 2 entries per bucket. We will see. 1181 * only 2 entries per bucket. We will see.
1182 */ 1182 */
1183 if (chain_length > ip_rt_gc_elasticity) { 1183 if (chain_length > ip_rt_gc_elasticity) {
1184 *candp = cand->u.dst.rt_next; 1184 *candp = cand->dst.rt_next;
1185 rt_free(cand); 1185 rt_free(cand);
1186 } 1186 }
1187 } else { 1187 } else {
1188 if (chain_length > rt_chain_length_max && 1188 if (chain_length > rt_chain_length_max &&
1189 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { 1189 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1190 struct net *net = dev_net(rt->u.dst.dev); 1190 struct net *net = dev_net(rt->dst.dev);
1191 int num = ++net->ipv4.current_rt_cache_rebuild_count; 1191 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1192 if (!rt_caching(net)) { 1192 if (!rt_caching(net)) {
1193 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", 1193 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1194 rt->u.dst.dev->name, num); 1194 rt->dst.dev->name, num);
1195 } 1195 }
1196 rt_emergency_hash_rebuild(net); 1196 rt_emergency_hash_rebuild(net);
1197 spin_unlock_bh(rt_hash_lock_addr(hash)); 1197 spin_unlock_bh(rt_hash_lock_addr(hash));
@@ -1206,7 +1206,7 @@ restart:
1206 route or unicast forwarding path. 1206 route or unicast forwarding path.
1207 */ 1207 */
1208 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { 1208 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1209 int err = arp_bind_neighbour(&rt->u.dst); 1209 int err = arp_bind_neighbour(&rt->dst);
1210 if (err) { 1210 if (err) {
1211 spin_unlock_bh(rt_hash_lock_addr(hash)); 1211 spin_unlock_bh(rt_hash_lock_addr(hash));
1212 1212
@@ -1237,14 +1237,14 @@ restart:
1237 } 1237 }
1238 } 1238 }
1239 1239
1240 rt->u.dst.rt_next = rt_hash_table[hash].chain; 1240 rt->dst.rt_next = rt_hash_table[hash].chain;
1241 1241
1242#if RT_CACHE_DEBUG >= 2 1242#if RT_CACHE_DEBUG >= 2
1243 if (rt->u.dst.rt_next) { 1243 if (rt->dst.rt_next) {
1244 struct rtable *trt; 1244 struct rtable *trt;
1245 printk(KERN_DEBUG "rt_cache @%02x: %pI4", 1245 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1246 hash, &rt->rt_dst); 1246 hash, &rt->rt_dst);
1247 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next) 1247 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
1248 printk(" . %pI4", &trt->rt_dst); 1248 printk(" . %pI4", &trt->rt_dst);
1249 printk("\n"); 1249 printk("\n");
1250 } 1250 }
@@ -1262,7 +1262,7 @@ skip_hashing:
1262 if (rp) 1262 if (rp)
1263 *rp = rt; 1263 *rp = rt;
1264 else 1264 else
1265 skb_dst_set(skb, &rt->u.dst); 1265 skb_dst_set(skb, &rt->dst);
1266 return 0; 1266 return 0;
1267} 1267}
1268 1268
@@ -1334,11 +1334,11 @@ static void rt_del(unsigned hash, struct rtable *rt)
1334 ip_rt_put(rt); 1334 ip_rt_put(rt);
1335 while ((aux = *rthp) != NULL) { 1335 while ((aux = *rthp) != NULL) {
1336 if (aux == rt || rt_is_expired(aux)) { 1336 if (aux == rt || rt_is_expired(aux)) {
1337 *rthp = aux->u.dst.rt_next; 1337 *rthp = aux->dst.rt_next;
1338 rt_free(aux); 1338 rt_free(aux);
1339 continue; 1339 continue;
1340 } 1340 }
1341 rthp = &aux->u.dst.rt_next; 1341 rthp = &aux->dst.rt_next;
1342 } 1342 }
1343 spin_unlock_bh(rt_hash_lock_addr(hash)); 1343 spin_unlock_bh(rt_hash_lock_addr(hash));
1344} 1344}
@@ -1392,19 +1392,19 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1392 rth->fl.oif != ikeys[k] || 1392 rth->fl.oif != ikeys[k] ||
1393 rth->fl.iif != 0 || 1393 rth->fl.iif != 0 ||
1394 rt_is_expired(rth) || 1394 rt_is_expired(rth) ||
1395 !net_eq(dev_net(rth->u.dst.dev), net)) { 1395 !net_eq(dev_net(rth->dst.dev), net)) {
1396 rthp = &rth->u.dst.rt_next; 1396 rthp = &rth->dst.rt_next;
1397 continue; 1397 continue;
1398 } 1398 }
1399 1399
1400 if (rth->rt_dst != daddr || 1400 if (rth->rt_dst != daddr ||
1401 rth->rt_src != saddr || 1401 rth->rt_src != saddr ||
1402 rth->u.dst.error || 1402 rth->dst.error ||
1403 rth->rt_gateway != old_gw || 1403 rth->rt_gateway != old_gw ||
1404 rth->u.dst.dev != dev) 1404 rth->dst.dev != dev)
1405 break; 1405 break;
1406 1406
1407 dst_hold(&rth->u.dst); 1407 dst_hold(&rth->dst);
1408 1408
1409 rt = dst_alloc(&ipv4_dst_ops); 1409 rt = dst_alloc(&ipv4_dst_ops);
1410 if (rt == NULL) { 1410 if (rt == NULL) {
@@ -1414,20 +1414,20 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1414 1414
1415 /* Copy all the information. */ 1415 /* Copy all the information. */
1416 *rt = *rth; 1416 *rt = *rth;
1417 rt->u.dst.__use = 1; 1417 rt->dst.__use = 1;
1418 atomic_set(&rt->u.dst.__refcnt, 1); 1418 atomic_set(&rt->dst.__refcnt, 1);
1419 rt->u.dst.child = NULL; 1419 rt->dst.child = NULL;
1420 if (rt->u.dst.dev) 1420 if (rt->dst.dev)
1421 dev_hold(rt->u.dst.dev); 1421 dev_hold(rt->dst.dev);
1422 if (rt->idev) 1422 if (rt->idev)
1423 in_dev_hold(rt->idev); 1423 in_dev_hold(rt->idev);
1424 rt->u.dst.obsolete = -1; 1424 rt->dst.obsolete = -1;
1425 rt->u.dst.lastuse = jiffies; 1425 rt->dst.lastuse = jiffies;
1426 rt->u.dst.path = &rt->u.dst; 1426 rt->dst.path = &rt->dst;
1427 rt->u.dst.neighbour = NULL; 1427 rt->dst.neighbour = NULL;
1428 rt->u.dst.hh = NULL; 1428 rt->dst.hh = NULL;
1429#ifdef CONFIG_XFRM 1429#ifdef CONFIG_XFRM
1430 rt->u.dst.xfrm = NULL; 1430 rt->dst.xfrm = NULL;
1431#endif 1431#endif
1432 rt->rt_genid = rt_genid(net); 1432 rt->rt_genid = rt_genid(net);
1433 rt->rt_flags |= RTCF_REDIRECTED; 1433 rt->rt_flags |= RTCF_REDIRECTED;
@@ -1436,23 +1436,23 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1436 rt->rt_gateway = new_gw; 1436 rt->rt_gateway = new_gw;
1437 1437
1438 /* Redirect received -> path was valid */ 1438 /* Redirect received -> path was valid */
1439 dst_confirm(&rth->u.dst); 1439 dst_confirm(&rth->dst);
1440 1440
1441 if (rt->peer) 1441 if (rt->peer)
1442 atomic_inc(&rt->peer->refcnt); 1442 atomic_inc(&rt->peer->refcnt);
1443 1443
1444 if (arp_bind_neighbour(&rt->u.dst) || 1444 if (arp_bind_neighbour(&rt->dst) ||
1445 !(rt->u.dst.neighbour->nud_state & 1445 !(rt->dst.neighbour->nud_state &
1446 NUD_VALID)) { 1446 NUD_VALID)) {
1447 if (rt->u.dst.neighbour) 1447 if (rt->dst.neighbour)
1448 neigh_event_send(rt->u.dst.neighbour, NULL); 1448 neigh_event_send(rt->dst.neighbour, NULL);
1449 ip_rt_put(rth); 1449 ip_rt_put(rth);
1450 rt_drop(rt); 1450 rt_drop(rt);
1451 goto do_next; 1451 goto do_next;
1452 } 1452 }
1453 1453
1454 netevent.old = &rth->u.dst; 1454 netevent.old = &rth->dst;
1455 netevent.new = &rt->u.dst; 1455 netevent.new = &rt->dst;
1456 call_netevent_notifiers(NETEVENT_REDIRECT, 1456 call_netevent_notifiers(NETEVENT_REDIRECT,
1457 &netevent); 1457 &netevent);
1458 1458
@@ -1488,8 +1488,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1488 ip_rt_put(rt); 1488 ip_rt_put(rt);
1489 ret = NULL; 1489 ret = NULL;
1490 } else if ((rt->rt_flags & RTCF_REDIRECTED) || 1490 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1491 (rt->u.dst.expires && 1491 (rt->dst.expires &&
1492 time_after_eq(jiffies, rt->u.dst.expires))) { 1492 time_after_eq(jiffies, rt->dst.expires))) {
1493 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, 1493 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1494 rt->fl.oif, 1494 rt->fl.oif,
1495 rt_genid(dev_net(dst->dev))); 1495 rt_genid(dev_net(dst->dev)));
@@ -1527,7 +1527,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1527 int log_martians; 1527 int log_martians;
1528 1528
1529 rcu_read_lock(); 1529 rcu_read_lock();
1530 in_dev = __in_dev_get_rcu(rt->u.dst.dev); 1530 in_dev = __in_dev_get_rcu(rt->dst.dev);
1531 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) { 1531 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1532 rcu_read_unlock(); 1532 rcu_read_unlock();
1533 return; 1533 return;
@@ -1538,30 +1538,30 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1538 /* No redirected packets during ip_rt_redirect_silence; 1538 /* No redirected packets during ip_rt_redirect_silence;
1539 * reset the algorithm. 1539 * reset the algorithm.
1540 */ 1540 */
1541 if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence)) 1541 if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence))
1542 rt->u.dst.rate_tokens = 0; 1542 rt->dst.rate_tokens = 0;
1543 1543
1544 /* Too many ignored redirects; do not send anything 1544 /* Too many ignored redirects; do not send anything
1545 * set u.dst.rate_last to the last seen redirected packet. 1545 * set dst.rate_last to the last seen redirected packet.
1546 */ 1546 */
1547 if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) { 1547 if (rt->dst.rate_tokens >= ip_rt_redirect_number) {
1548 rt->u.dst.rate_last = jiffies; 1548 rt->dst.rate_last = jiffies;
1549 return; 1549 return;
1550 } 1550 }
1551 1551
1552 /* Check for load limit; set rate_last to the latest sent 1552 /* Check for load limit; set rate_last to the latest sent
1553 * redirect. 1553 * redirect.
1554 */ 1554 */
1555 if (rt->u.dst.rate_tokens == 0 || 1555 if (rt->dst.rate_tokens == 0 ||
1556 time_after(jiffies, 1556 time_after(jiffies,
1557 (rt->u.dst.rate_last + 1557 (rt->dst.rate_last +
1558 (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) { 1558 (ip_rt_redirect_load << rt->dst.rate_tokens)))) {
1559 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); 1559 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1560 rt->u.dst.rate_last = jiffies; 1560 rt->dst.rate_last = jiffies;
1561 ++rt->u.dst.rate_tokens; 1561 ++rt->dst.rate_tokens;
1562#ifdef CONFIG_IP_ROUTE_VERBOSE 1562#ifdef CONFIG_IP_ROUTE_VERBOSE
1563 if (log_martians && 1563 if (log_martians &&
1564 rt->u.dst.rate_tokens == ip_rt_redirect_number && 1564 rt->dst.rate_tokens == ip_rt_redirect_number &&
1565 net_ratelimit()) 1565 net_ratelimit())
1566 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", 1566 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1567 &rt->rt_src, rt->rt_iif, 1567 &rt->rt_src, rt->rt_iif,
@@ -1576,7 +1576,7 @@ static int ip_error(struct sk_buff *skb)
1576 unsigned long now; 1576 unsigned long now;
1577 int code; 1577 int code;
1578 1578
1579 switch (rt->u.dst.error) { 1579 switch (rt->dst.error) {
1580 case EINVAL: 1580 case EINVAL:
1581 default: 1581 default:
1582 goto out; 1582 goto out;
@@ -1585,7 +1585,7 @@ static int ip_error(struct sk_buff *skb)
1585 break; 1585 break;
1586 case ENETUNREACH: 1586 case ENETUNREACH:
1587 code = ICMP_NET_UNREACH; 1587 code = ICMP_NET_UNREACH;
1588 IP_INC_STATS_BH(dev_net(rt->u.dst.dev), 1588 IP_INC_STATS_BH(dev_net(rt->dst.dev),
1589 IPSTATS_MIB_INNOROUTES); 1589 IPSTATS_MIB_INNOROUTES);
1590 break; 1590 break;
1591 case EACCES: 1591 case EACCES:
@@ -1594,12 +1594,12 @@ static int ip_error(struct sk_buff *skb)
1594 } 1594 }
1595 1595
1596 now = jiffies; 1596 now = jiffies;
1597 rt->u.dst.rate_tokens += now - rt->u.dst.rate_last; 1597 rt->dst.rate_tokens += now - rt->dst.rate_last;
1598 if (rt->u.dst.rate_tokens > ip_rt_error_burst) 1598 if (rt->dst.rate_tokens > ip_rt_error_burst)
1599 rt->u.dst.rate_tokens = ip_rt_error_burst; 1599 rt->dst.rate_tokens = ip_rt_error_burst;
1600 rt->u.dst.rate_last = now; 1600 rt->dst.rate_last = now;
1601 if (rt->u.dst.rate_tokens >= ip_rt_error_cost) { 1601 if (rt->dst.rate_tokens >= ip_rt_error_cost) {
1602 rt->u.dst.rate_tokens -= ip_rt_error_cost; 1602 rt->dst.rate_tokens -= ip_rt_error_cost;
1603 icmp_send(skb, ICMP_DEST_UNREACH, code, 0); 1603 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1604 } 1604 }
1605 1605
@@ -1644,7 +1644,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1644 1644
1645 rcu_read_lock(); 1645 rcu_read_lock();
1646 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 1646 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1647 rth = rcu_dereference(rth->u.dst.rt_next)) { 1647 rth = rcu_dereference(rth->dst.rt_next)) {
1648 unsigned short mtu = new_mtu; 1648 unsigned short mtu = new_mtu;
1649 1649
1650 if (rth->fl.fl4_dst != daddr || 1650 if (rth->fl.fl4_dst != daddr ||
@@ -1653,8 +1653,8 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1653 rth->rt_src != iph->saddr || 1653 rth->rt_src != iph->saddr ||
1654 rth->fl.oif != ikeys[k] || 1654 rth->fl.oif != ikeys[k] ||
1655 rth->fl.iif != 0 || 1655 rth->fl.iif != 0 ||
1656 dst_metric_locked(&rth->u.dst, RTAX_MTU) || 1656 dst_metric_locked(&rth->dst, RTAX_MTU) ||
1657 !net_eq(dev_net(rth->u.dst.dev), net) || 1657 !net_eq(dev_net(rth->dst.dev), net) ||
1658 rt_is_expired(rth)) 1658 rt_is_expired(rth))
1659 continue; 1659 continue;
1660 1660
@@ -1662,22 +1662,22 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1662 1662
1663 /* BSD 4.2 compatibility hack :-( */ 1663 /* BSD 4.2 compatibility hack :-( */
1664 if (mtu == 0 && 1664 if (mtu == 0 &&
1665 old_mtu >= dst_mtu(&rth->u.dst) && 1665 old_mtu >= dst_mtu(&rth->dst) &&
1666 old_mtu >= 68 + (iph->ihl << 2)) 1666 old_mtu >= 68 + (iph->ihl << 2))
1667 old_mtu -= iph->ihl << 2; 1667 old_mtu -= iph->ihl << 2;
1668 1668
1669 mtu = guess_mtu(old_mtu); 1669 mtu = guess_mtu(old_mtu);
1670 } 1670 }
1671 if (mtu <= dst_mtu(&rth->u.dst)) { 1671 if (mtu <= dst_mtu(&rth->dst)) {
1672 if (mtu < dst_mtu(&rth->u.dst)) { 1672 if (mtu < dst_mtu(&rth->dst)) {
1673 dst_confirm(&rth->u.dst); 1673 dst_confirm(&rth->dst);
1674 if (mtu < ip_rt_min_pmtu) { 1674 if (mtu < ip_rt_min_pmtu) {
1675 mtu = ip_rt_min_pmtu; 1675 mtu = ip_rt_min_pmtu;
1676 rth->u.dst.metrics[RTAX_LOCK-1] |= 1676 rth->dst.metrics[RTAX_LOCK-1] |=
1677 (1 << RTAX_MTU); 1677 (1 << RTAX_MTU);
1678 } 1678 }
1679 rth->u.dst.metrics[RTAX_MTU-1] = mtu; 1679 rth->dst.metrics[RTAX_MTU-1] = mtu;
1680 dst_set_expires(&rth->u.dst, 1680 dst_set_expires(&rth->dst,
1681 ip_rt_mtu_expires); 1681 ip_rt_mtu_expires);
1682 } 1682 }
1683 est_mtu = mtu; 1683 est_mtu = mtu;
@@ -1750,7 +1750,7 @@ static void ipv4_link_failure(struct sk_buff *skb)
1750 1750
1751 rt = skb_rtable(skb); 1751 rt = skb_rtable(skb);
1752 if (rt) 1752 if (rt)
1753 dst_set_expires(&rt->u.dst, 0); 1753 dst_set_expires(&rt->dst, 0);
1754} 1754}
1755 1755
1756static int ip_rt_bug(struct sk_buff *skb) 1756static int ip_rt_bug(struct sk_buff *skb)
@@ -1778,11 +1778,11 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
1778 1778
1779 if (rt->fl.iif == 0) 1779 if (rt->fl.iif == 0)
1780 src = rt->rt_src; 1780 src = rt->rt_src;
1781 else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) { 1781 else if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0) {
1782 src = FIB_RES_PREFSRC(res); 1782 src = FIB_RES_PREFSRC(res);
1783 fib_res_put(&res); 1783 fib_res_put(&res);
1784 } else 1784 } else
1785 src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway, 1785 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1786 RT_SCOPE_UNIVERSE); 1786 RT_SCOPE_UNIVERSE);
1787 memcpy(addr, &src, 4); 1787 memcpy(addr, &src, 4);
1788} 1788}
@@ -1790,10 +1790,10 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
1790#ifdef CONFIG_NET_CLS_ROUTE 1790#ifdef CONFIG_NET_CLS_ROUTE
1791static void set_class_tag(struct rtable *rt, u32 tag) 1791static void set_class_tag(struct rtable *rt, u32 tag)
1792{ 1792{
1793 if (!(rt->u.dst.tclassid & 0xFFFF)) 1793 if (!(rt->dst.tclassid & 0xFFFF))
1794 rt->u.dst.tclassid |= tag & 0xFFFF; 1794 rt->dst.tclassid |= tag & 0xFFFF;
1795 if (!(rt->u.dst.tclassid & 0xFFFF0000)) 1795 if (!(rt->dst.tclassid & 0xFFFF0000))
1796 rt->u.dst.tclassid |= tag & 0xFFFF0000; 1796 rt->dst.tclassid |= tag & 0xFFFF0000;
1797} 1797}
1798#endif 1798#endif
1799 1799
@@ -1805,30 +1805,30 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1805 if (FIB_RES_GW(*res) && 1805 if (FIB_RES_GW(*res) &&
1806 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 1806 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1807 rt->rt_gateway = FIB_RES_GW(*res); 1807 rt->rt_gateway = FIB_RES_GW(*res);
1808 memcpy(rt->u.dst.metrics, fi->fib_metrics, 1808 memcpy(rt->dst.metrics, fi->fib_metrics,
1809 sizeof(rt->u.dst.metrics)); 1809 sizeof(rt->dst.metrics));
1810 if (fi->fib_mtu == 0) { 1810 if (fi->fib_mtu == 0) {
1811 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; 1811 rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
1812 if (dst_metric_locked(&rt->u.dst, RTAX_MTU) && 1812 if (dst_metric_locked(&rt->dst, RTAX_MTU) &&
1813 rt->rt_gateway != rt->rt_dst && 1813 rt->rt_gateway != rt->rt_dst &&
1814 rt->u.dst.dev->mtu > 576) 1814 rt->dst.dev->mtu > 576)
1815 rt->u.dst.metrics[RTAX_MTU-1] = 576; 1815 rt->dst.metrics[RTAX_MTU-1] = 576;
1816 } 1816 }
1817#ifdef CONFIG_NET_CLS_ROUTE 1817#ifdef CONFIG_NET_CLS_ROUTE
1818 rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid; 1818 rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1819#endif 1819#endif
1820 } else 1820 } else
1821 rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu; 1821 rt->dst.metrics[RTAX_MTU-1]= rt->dst.dev->mtu;
1822 1822
1823 if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0) 1823 if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
1824 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl; 1824 rt->dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1825 if (dst_mtu(&rt->u.dst) > IP_MAX_MTU) 1825 if (dst_mtu(&rt->dst) > IP_MAX_MTU)
1826 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU; 1826 rt->dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1827 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0) 1827 if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0)
1828 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40, 1828 rt->dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->dst.dev->mtu - 40,
1829 ip_rt_min_advmss); 1829 ip_rt_min_advmss);
1830 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) > 65535 - 40) 1830 if (dst_metric(&rt->dst, RTAX_ADVMSS) > 65535 - 40)
1831 rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40; 1831 rt->dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1832 1832
1833#ifdef CONFIG_NET_CLS_ROUTE 1833#ifdef CONFIG_NET_CLS_ROUTE
1834#ifdef CONFIG_IP_MULTIPLE_TABLES 1834#ifdef CONFIG_IP_MULTIPLE_TABLES
@@ -1873,13 +1873,13 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1873 if (!rth) 1873 if (!rth)
1874 goto e_nobufs; 1874 goto e_nobufs;
1875 1875
1876 rth->u.dst.output = ip_rt_bug; 1876 rth->dst.output = ip_rt_bug;
1877 rth->u.dst.obsolete = -1; 1877 rth->dst.obsolete = -1;
1878 1878
1879 atomic_set(&rth->u.dst.__refcnt, 1); 1879 atomic_set(&rth->dst.__refcnt, 1);
1880 rth->u.dst.flags= DST_HOST; 1880 rth->dst.flags= DST_HOST;
1881 if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) 1881 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1882 rth->u.dst.flags |= DST_NOPOLICY; 1882 rth->dst.flags |= DST_NOPOLICY;
1883 rth->fl.fl4_dst = daddr; 1883 rth->fl.fl4_dst = daddr;
1884 rth->rt_dst = daddr; 1884 rth->rt_dst = daddr;
1885 rth->fl.fl4_tos = tos; 1885 rth->fl.fl4_tos = tos;
@@ -1887,13 +1887,13 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1887 rth->fl.fl4_src = saddr; 1887 rth->fl.fl4_src = saddr;
1888 rth->rt_src = saddr; 1888 rth->rt_src = saddr;
1889#ifdef CONFIG_NET_CLS_ROUTE 1889#ifdef CONFIG_NET_CLS_ROUTE
1890 rth->u.dst.tclassid = itag; 1890 rth->dst.tclassid = itag;
1891#endif 1891#endif
1892 rth->rt_iif = 1892 rth->rt_iif =
1893 rth->fl.iif = dev->ifindex; 1893 rth->fl.iif = dev->ifindex;
1894 rth->u.dst.dev = init_net.loopback_dev; 1894 rth->dst.dev = init_net.loopback_dev;
1895 dev_hold(rth->u.dst.dev); 1895 dev_hold(rth->dst.dev);
1896 rth->idev = in_dev_get(rth->u.dst.dev); 1896 rth->idev = in_dev_get(rth->dst.dev);
1897 rth->fl.oif = 0; 1897 rth->fl.oif = 0;
1898 rth->rt_gateway = daddr; 1898 rth->rt_gateway = daddr;
1899 rth->rt_spec_dst= spec_dst; 1899 rth->rt_spec_dst= spec_dst;
@@ -1901,13 +1901,13 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1901 rth->rt_flags = RTCF_MULTICAST; 1901 rth->rt_flags = RTCF_MULTICAST;
1902 rth->rt_type = RTN_MULTICAST; 1902 rth->rt_type = RTN_MULTICAST;
1903 if (our) { 1903 if (our) {
1904 rth->u.dst.input= ip_local_deliver; 1904 rth->dst.input= ip_local_deliver;
1905 rth->rt_flags |= RTCF_LOCAL; 1905 rth->rt_flags |= RTCF_LOCAL;
1906 } 1906 }
1907 1907
1908#ifdef CONFIG_IP_MROUTE 1908#ifdef CONFIG_IP_MROUTE
1909 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev)) 1909 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1910 rth->u.dst.input = ip_mr_input; 1910 rth->dst.input = ip_mr_input;
1911#endif 1911#endif
1912 RT_CACHE_STAT_INC(in_slow_mc); 1912 RT_CACHE_STAT_INC(in_slow_mc);
1913 1913
@@ -2016,12 +2016,12 @@ static int __mkroute_input(struct sk_buff *skb,
2016 goto cleanup; 2016 goto cleanup;
2017 } 2017 }
2018 2018
2019 atomic_set(&rth->u.dst.__refcnt, 1); 2019 atomic_set(&rth->dst.__refcnt, 1);
2020 rth->u.dst.flags= DST_HOST; 2020 rth->dst.flags= DST_HOST;
2021 if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) 2021 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2022 rth->u.dst.flags |= DST_NOPOLICY; 2022 rth->dst.flags |= DST_NOPOLICY;
2023 if (IN_DEV_CONF_GET(out_dev, NOXFRM)) 2023 if (IN_DEV_CONF_GET(out_dev, NOXFRM))
2024 rth->u.dst.flags |= DST_NOXFRM; 2024 rth->dst.flags |= DST_NOXFRM;
2025 rth->fl.fl4_dst = daddr; 2025 rth->fl.fl4_dst = daddr;
2026 rth->rt_dst = daddr; 2026 rth->rt_dst = daddr;
2027 rth->fl.fl4_tos = tos; 2027 rth->fl.fl4_tos = tos;
@@ -2031,16 +2031,16 @@ static int __mkroute_input(struct sk_buff *skb,
2031 rth->rt_gateway = daddr; 2031 rth->rt_gateway = daddr;
2032 rth->rt_iif = 2032 rth->rt_iif =
2033 rth->fl.iif = in_dev->dev->ifindex; 2033 rth->fl.iif = in_dev->dev->ifindex;
2034 rth->u.dst.dev = (out_dev)->dev; 2034 rth->dst.dev = (out_dev)->dev;
2035 dev_hold(rth->u.dst.dev); 2035 dev_hold(rth->dst.dev);
2036 rth->idev = in_dev_get(rth->u.dst.dev); 2036 rth->idev = in_dev_get(rth->dst.dev);
2037 rth->fl.oif = 0; 2037 rth->fl.oif = 0;
2038 rth->rt_spec_dst= spec_dst; 2038 rth->rt_spec_dst= spec_dst;
2039 2039
2040 rth->u.dst.obsolete = -1; 2040 rth->dst.obsolete = -1;
2041 rth->u.dst.input = ip_forward; 2041 rth->dst.input = ip_forward;
2042 rth->u.dst.output = ip_output; 2042 rth->dst.output = ip_output;
2043 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); 2043 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2044 2044
2045 rt_set_nexthop(rth, res, itag); 2045 rt_set_nexthop(rth, res, itag);
2046 2046
@@ -2074,7 +2074,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
2074 2074
2075 /* put it into the cache */ 2075 /* put it into the cache */
2076 hash = rt_hash(daddr, saddr, fl->iif, 2076 hash = rt_hash(daddr, saddr, fl->iif,
2077 rt_genid(dev_net(rth->u.dst.dev))); 2077 rt_genid(dev_net(rth->dst.dev)));
2078 return rt_intern_hash(hash, rth, NULL, skb, fl->iif); 2078 return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
2079} 2079}
2080 2080
@@ -2197,14 +2197,14 @@ local_input:
2197 if (!rth) 2197 if (!rth)
2198 goto e_nobufs; 2198 goto e_nobufs;
2199 2199
2200 rth->u.dst.output= ip_rt_bug; 2200 rth->dst.output= ip_rt_bug;
2201 rth->u.dst.obsolete = -1; 2201 rth->dst.obsolete = -1;
2202 rth->rt_genid = rt_genid(net); 2202 rth->rt_genid = rt_genid(net);
2203 2203
2204 atomic_set(&rth->u.dst.__refcnt, 1); 2204 atomic_set(&rth->dst.__refcnt, 1);
2205 rth->u.dst.flags= DST_HOST; 2205 rth->dst.flags= DST_HOST;
2206 if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) 2206 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2207 rth->u.dst.flags |= DST_NOPOLICY; 2207 rth->dst.flags |= DST_NOPOLICY;
2208 rth->fl.fl4_dst = daddr; 2208 rth->fl.fl4_dst = daddr;
2209 rth->rt_dst = daddr; 2209 rth->rt_dst = daddr;
2210 rth->fl.fl4_tos = tos; 2210 rth->fl.fl4_tos = tos;
@@ -2212,20 +2212,20 @@ local_input:
2212 rth->fl.fl4_src = saddr; 2212 rth->fl.fl4_src = saddr;
2213 rth->rt_src = saddr; 2213 rth->rt_src = saddr;
2214#ifdef CONFIG_NET_CLS_ROUTE 2214#ifdef CONFIG_NET_CLS_ROUTE
2215 rth->u.dst.tclassid = itag; 2215 rth->dst.tclassid = itag;
2216#endif 2216#endif
2217 rth->rt_iif = 2217 rth->rt_iif =
2218 rth->fl.iif = dev->ifindex; 2218 rth->fl.iif = dev->ifindex;
2219 rth->u.dst.dev = net->loopback_dev; 2219 rth->dst.dev = net->loopback_dev;
2220 dev_hold(rth->u.dst.dev); 2220 dev_hold(rth->dst.dev);
2221 rth->idev = in_dev_get(rth->u.dst.dev); 2221 rth->idev = in_dev_get(rth->dst.dev);
2222 rth->rt_gateway = daddr; 2222 rth->rt_gateway = daddr;
2223 rth->rt_spec_dst= spec_dst; 2223 rth->rt_spec_dst= spec_dst;
2224 rth->u.dst.input= ip_local_deliver; 2224 rth->dst.input= ip_local_deliver;
2225 rth->rt_flags = flags|RTCF_LOCAL; 2225 rth->rt_flags = flags|RTCF_LOCAL;
2226 if (res.type == RTN_UNREACHABLE) { 2226 if (res.type == RTN_UNREACHABLE) {
2227 rth->u.dst.input= ip_error; 2227 rth->dst.input= ip_error;
2228 rth->u.dst.error= -err; 2228 rth->dst.error= -err;
2229 rth->rt_flags &= ~RTCF_LOCAL; 2229 rth->rt_flags &= ~RTCF_LOCAL;
2230 } 2230 }
2231 rth->rt_type = res.type; 2231 rth->rt_type = res.type;
@@ -2291,21 +2291,21 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2291 hash = rt_hash(daddr, saddr, iif, rt_genid(net)); 2291 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2292 2292
2293 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2293 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2294 rth = rcu_dereference(rth->u.dst.rt_next)) { 2294 rth = rcu_dereference(rth->dst.rt_next)) {
2295 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) | 2295 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
2296 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) | 2296 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
2297 (rth->fl.iif ^ iif) | 2297 (rth->fl.iif ^ iif) |
2298 rth->fl.oif | 2298 rth->fl.oif |
2299 (rth->fl.fl4_tos ^ tos)) == 0 && 2299 (rth->fl.fl4_tos ^ tos)) == 0 &&
2300 rth->fl.mark == skb->mark && 2300 rth->fl.mark == skb->mark &&
2301 net_eq(dev_net(rth->u.dst.dev), net) && 2301 net_eq(dev_net(rth->dst.dev), net) &&
2302 !rt_is_expired(rth)) { 2302 !rt_is_expired(rth)) {
2303 if (noref) { 2303 if (noref) {
2304 dst_use_noref(&rth->u.dst, jiffies); 2304 dst_use_noref(&rth->dst, jiffies);
2305 skb_dst_set_noref(skb, &rth->u.dst); 2305 skb_dst_set_noref(skb, &rth->dst);
2306 } else { 2306 } else {
2307 dst_use(&rth->u.dst, jiffies); 2307 dst_use(&rth->dst, jiffies);
2308 skb_dst_set(skb, &rth->u.dst); 2308 skb_dst_set(skb, &rth->dst);
2309 } 2309 }
2310 RT_CACHE_STAT_INC(in_hit); 2310 RT_CACHE_STAT_INC(in_hit);
2311 rcu_read_unlock(); 2311 rcu_read_unlock();
@@ -2412,12 +2412,12 @@ static int __mkroute_output(struct rtable **result,
2412 goto cleanup; 2412 goto cleanup;
2413 } 2413 }
2414 2414
2415 atomic_set(&rth->u.dst.__refcnt, 1); 2415 atomic_set(&rth->dst.__refcnt, 1);
2416 rth->u.dst.flags= DST_HOST; 2416 rth->dst.flags= DST_HOST;
2417 if (IN_DEV_CONF_GET(in_dev, NOXFRM)) 2417 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2418 rth->u.dst.flags |= DST_NOXFRM; 2418 rth->dst.flags |= DST_NOXFRM;
2419 if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) 2419 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2420 rth->u.dst.flags |= DST_NOPOLICY; 2420 rth->dst.flags |= DST_NOPOLICY;
2421 2421
2422 rth->fl.fl4_dst = oldflp->fl4_dst; 2422 rth->fl.fl4_dst = oldflp->fl4_dst;
2423 rth->fl.fl4_tos = tos; 2423 rth->fl.fl4_tos = tos;
@@ -2429,35 +2429,35 @@ static int __mkroute_output(struct rtable **result,
2429 rth->rt_iif = oldflp->oif ? : dev_out->ifindex; 2429 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2430 /* get references to the devices that are to be hold by the routing 2430 /* get references to the devices that are to be hold by the routing
2431 cache entry */ 2431 cache entry */
2432 rth->u.dst.dev = dev_out; 2432 rth->dst.dev = dev_out;
2433 dev_hold(dev_out); 2433 dev_hold(dev_out);
2434 rth->idev = in_dev_get(dev_out); 2434 rth->idev = in_dev_get(dev_out);
2435 rth->rt_gateway = fl->fl4_dst; 2435 rth->rt_gateway = fl->fl4_dst;
2436 rth->rt_spec_dst= fl->fl4_src; 2436 rth->rt_spec_dst= fl->fl4_src;
2437 2437
2438 rth->u.dst.output=ip_output; 2438 rth->dst.output=ip_output;
2439 rth->u.dst.obsolete = -1; 2439 rth->dst.obsolete = -1;
2440 rth->rt_genid = rt_genid(dev_net(dev_out)); 2440 rth->rt_genid = rt_genid(dev_net(dev_out));
2441 2441
2442 RT_CACHE_STAT_INC(out_slow_tot); 2442 RT_CACHE_STAT_INC(out_slow_tot);
2443 2443
2444 if (flags & RTCF_LOCAL) { 2444 if (flags & RTCF_LOCAL) {
2445 rth->u.dst.input = ip_local_deliver; 2445 rth->dst.input = ip_local_deliver;
2446 rth->rt_spec_dst = fl->fl4_dst; 2446 rth->rt_spec_dst = fl->fl4_dst;
2447 } 2447 }
2448 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 2448 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2449 rth->rt_spec_dst = fl->fl4_src; 2449 rth->rt_spec_dst = fl->fl4_src;
2450 if (flags & RTCF_LOCAL && 2450 if (flags & RTCF_LOCAL &&
2451 !(dev_out->flags & IFF_LOOPBACK)) { 2451 !(dev_out->flags & IFF_LOOPBACK)) {
2452 rth->u.dst.output = ip_mc_output; 2452 rth->dst.output = ip_mc_output;
2453 RT_CACHE_STAT_INC(out_slow_mc); 2453 RT_CACHE_STAT_INC(out_slow_mc);
2454 } 2454 }
2455#ifdef CONFIG_IP_MROUTE 2455#ifdef CONFIG_IP_MROUTE
2456 if (res->type == RTN_MULTICAST) { 2456 if (res->type == RTN_MULTICAST) {
2457 if (IN_DEV_MFORWARD(in_dev) && 2457 if (IN_DEV_MFORWARD(in_dev) &&
2458 !ipv4_is_local_multicast(oldflp->fl4_dst)) { 2458 !ipv4_is_local_multicast(oldflp->fl4_dst)) {
2459 rth->u.dst.input = ip_mr_input; 2459 rth->dst.input = ip_mr_input;
2460 rth->u.dst.output = ip_mc_output; 2460 rth->dst.output = ip_mc_output;
2461 } 2461 }
2462 } 2462 }
2463#endif 2463#endif
@@ -2712,7 +2712,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2712 2712
2713 rcu_read_lock_bh(); 2713 rcu_read_lock_bh();
2714 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth; 2714 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2715 rth = rcu_dereference_bh(rth->u.dst.rt_next)) { 2715 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2716 if (rth->fl.fl4_dst == flp->fl4_dst && 2716 if (rth->fl.fl4_dst == flp->fl4_dst &&
2717 rth->fl.fl4_src == flp->fl4_src && 2717 rth->fl.fl4_src == flp->fl4_src &&
2718 rth->fl.iif == 0 && 2718 rth->fl.iif == 0 &&
@@ -2720,9 +2720,9 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2720 rth->fl.mark == flp->mark && 2720 rth->fl.mark == flp->mark &&
2721 !((rth->fl.fl4_tos ^ flp->fl4_tos) & 2721 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2722 (IPTOS_RT_MASK | RTO_ONLINK)) && 2722 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2723 net_eq(dev_net(rth->u.dst.dev), net) && 2723 net_eq(dev_net(rth->dst.dev), net) &&
2724 !rt_is_expired(rth)) { 2724 !rt_is_expired(rth)) {
2725 dst_use(&rth->u.dst, jiffies); 2725 dst_use(&rth->dst, jiffies);
2726 RT_CACHE_STAT_INC(out_hit); 2726 RT_CACHE_STAT_INC(out_hit);
2727 rcu_read_unlock_bh(); 2727 rcu_read_unlock_bh();
2728 *rp = rth; 2728 *rp = rth;
@@ -2759,15 +2759,15 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
2759 dst_alloc(&ipv4_dst_blackhole_ops); 2759 dst_alloc(&ipv4_dst_blackhole_ops);
2760 2760
2761 if (rt) { 2761 if (rt) {
2762 struct dst_entry *new = &rt->u.dst; 2762 struct dst_entry *new = &rt->dst;
2763 2763
2764 atomic_set(&new->__refcnt, 1); 2764 atomic_set(&new->__refcnt, 1);
2765 new->__use = 1; 2765 new->__use = 1;
2766 new->input = dst_discard; 2766 new->input = dst_discard;
2767 new->output = dst_discard; 2767 new->output = dst_discard;
2768 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32)); 2768 memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
2769 2769
2770 new->dev = ort->u.dst.dev; 2770 new->dev = ort->dst.dev;
2771 if (new->dev) 2771 if (new->dev)
2772 dev_hold(new->dev); 2772 dev_hold(new->dev);
2773 2773
@@ -2791,7 +2791,7 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
2791 dst_free(new); 2791 dst_free(new);
2792 } 2792 }
2793 2793
2794 dst_release(&(*rp)->u.dst); 2794 dst_release(&(*rp)->dst);
2795 *rp = rt; 2795 *rp = rt;
2796 return (rt ? 0 : -ENOMEM); 2796 return (rt ? 0 : -ENOMEM);
2797} 2797}
@@ -2861,11 +2861,11 @@ static int rt_fill_info(struct net *net,
2861 r->rtm_src_len = 32; 2861 r->rtm_src_len = 32;
2862 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src); 2862 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2863 } 2863 }
2864 if (rt->u.dst.dev) 2864 if (rt->dst.dev)
2865 NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex); 2865 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
2866#ifdef CONFIG_NET_CLS_ROUTE 2866#ifdef CONFIG_NET_CLS_ROUTE
2867 if (rt->u.dst.tclassid) 2867 if (rt->dst.tclassid)
2868 NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid); 2868 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
2869#endif 2869#endif
2870 if (rt->fl.iif) 2870 if (rt->fl.iif)
2871 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); 2871 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
@@ -2875,11 +2875,11 @@ static int rt_fill_info(struct net *net,
2875 if (rt->rt_dst != rt->rt_gateway) 2875 if (rt->rt_dst != rt->rt_gateway)
2876 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway); 2876 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2877 2877
2878 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) 2878 if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
2879 goto nla_put_failure; 2879 goto nla_put_failure;
2880 2880
2881 error = rt->u.dst.error; 2881 error = rt->dst.error;
2882 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; 2882 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
2883 if (rt->peer) { 2883 if (rt->peer) {
2884 id = atomic_read(&rt->peer->ip_id_count) & 0xffff; 2884 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
2885 if (rt->peer->tcp_ts_stamp) { 2885 if (rt->peer->tcp_ts_stamp) {
@@ -2911,7 +2911,7 @@ static int rt_fill_info(struct net *net,
2911 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif); 2911 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
2912 } 2912 }
2913 2913
2914 if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage, 2914 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
2915 expires, error) < 0) 2915 expires, error) < 0)
2916 goto nla_put_failure; 2916 goto nla_put_failure;
2917 2917
@@ -2976,8 +2976,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2976 local_bh_enable(); 2976 local_bh_enable();
2977 2977
2978 rt = skb_rtable(skb); 2978 rt = skb_rtable(skb);
2979 if (err == 0 && rt->u.dst.error) 2979 if (err == 0 && rt->dst.error)
2980 err = -rt->u.dst.error; 2980 err = -rt->dst.error;
2981 } else { 2981 } else {
2982 struct flowi fl = { 2982 struct flowi fl = {
2983 .nl_u = { 2983 .nl_u = {
@@ -2995,7 +2995,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2995 if (err) 2995 if (err)
2996 goto errout_free; 2996 goto errout_free;
2997 2997
2998 skb_dst_set(skb, &rt->u.dst); 2998 skb_dst_set(skb, &rt->dst);
2999 if (rtm->rtm_flags & RTM_F_NOTIFY) 2999 if (rtm->rtm_flags & RTM_F_NOTIFY)
3000 rt->rt_flags |= RTCF_NOTIFY; 3000 rt->rt_flags |= RTCF_NOTIFY;
3001 3001
@@ -3031,12 +3031,12 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
3031 continue; 3031 continue;
3032 rcu_read_lock_bh(); 3032 rcu_read_lock_bh();
3033 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt; 3033 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
3034 rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) { 3034 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
3035 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) 3035 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
3036 continue; 3036 continue;
3037 if (rt_is_expired(rt)) 3037 if (rt_is_expired(rt))
3038 continue; 3038 continue;
3039 skb_dst_set_noref(skb, &rt->u.dst); 3039 skb_dst_set_noref(skb, &rt->dst);
3040 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, 3040 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
3041 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 3041 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
3042 1, NLM_F_MULTI) <= 0) { 3042 1, NLM_F_MULTI) <= 0) {