aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/arp.c12
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/icmp.c18
-rw-r--r--net/ipv4/igmp.c10
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_forward.c10
-rw-r--r--net/ipv4/ip_gre.c14
-rw-r--r--net/ipv4/ip_input.c4
-rw-r--r--net/ipv4/ip_output.c60
-rw-r--r--net/ipv4/ipip.c8
-rw-r--r--net/ipv4/ipmr.c8
-rw-r--r--net/ipv4/netfilter.c8
-rw-r--r--net/ipv4/raw.c16
-rw-r--r--net/ipv4/route.c420
-rw-r--r--net/ipv4/syncookies.c6
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/xfrm4_policy.c2
19 files changed, 305 insertions, 305 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 551ce564b035..d99e7e020189 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1100,7 +1100,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
1100 if (err) 1100 if (err)
1101 return err; 1101 return err;
1102 1102
1103 sk_setup_caps(sk, &rt->u.dst); 1103 sk_setup_caps(sk, &rt->dst);
1104 1104
1105 new_saddr = rt->rt_src; 1105 new_saddr = rt->rt_src;
1106 1106
@@ -1166,7 +1166,7 @@ int inet_sk_rebuild_header(struct sock *sk)
1166 err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0); 1166 err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0);
1167} 1167}
1168 if (!err) 1168 if (!err)
1169 sk_setup_caps(sk, &rt->u.dst); 1169 sk_setup_caps(sk, &rt->dst);
1170 else { 1170 else {
1171 /* Routing failed... */ 1171 /* Routing failed... */
1172 sk->sk_route_caps = 0; 1172 sk->sk_route_caps = 0;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 917d2d66162e..cf78f41830ca 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -427,7 +427,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
427 427
428 if (ip_route_output_key(net, &rt, &fl) < 0) 428 if (ip_route_output_key(net, &rt, &fl) < 0)
429 return 1; 429 return 1;
430 if (rt->u.dst.dev != dev) { 430 if (rt->dst.dev != dev) {
431 NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER); 431 NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
432 flag = 1; 432 flag = 1;
433 } 433 }
@@ -532,7 +532,7 @@ static inline int arp_fwd_proxy(struct in_device *in_dev,
532 struct in_device *out_dev; 532 struct in_device *out_dev;
533 int imi, omi = -1; 533 int imi, omi = -1;
534 534
535 if (rt->u.dst.dev == dev) 535 if (rt->dst.dev == dev)
536 return 0; 536 return 0;
537 537
538 if (!IN_DEV_PROXY_ARP(in_dev)) 538 if (!IN_DEV_PROXY_ARP(in_dev))
@@ -545,7 +545,7 @@ static inline int arp_fwd_proxy(struct in_device *in_dev,
545 545
546 /* place to check for proxy_arp for routes */ 546 /* place to check for proxy_arp for routes */
547 547
548 out_dev = __in_dev_get_rcu(rt->u.dst.dev); 548 out_dev = __in_dev_get_rcu(rt->dst.dev);
549 if (out_dev) 549 if (out_dev)
550 omi = IN_DEV_MEDIUM_ID(out_dev); 550 omi = IN_DEV_MEDIUM_ID(out_dev);
551 551
@@ -576,7 +576,7 @@ static inline int arp_fwd_pvlan(struct in_device *in_dev,
576 __be32 sip, __be32 tip) 576 __be32 sip, __be32 tip)
577{ 577{
578 /* Private VLAN is only concerned about the same ethernet segment */ 578 /* Private VLAN is only concerned about the same ethernet segment */
579 if (rt->u.dst.dev != dev) 579 if (rt->dst.dev != dev)
580 return 0; 580 return 0;
581 581
582 /* Don't reply on self probes (often done by windowz boxes)*/ 582 /* Don't reply on self probes (often done by windowz boxes)*/
@@ -1042,7 +1042,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1042 struct rtable * rt; 1042 struct rtable * rt;
1043 if ((err = ip_route_output_key(net, &rt, &fl)) != 0) 1043 if ((err = ip_route_output_key(net, &rt, &fl)) != 0)
1044 return err; 1044 return err;
1045 dev = rt->u.dst.dev; 1045 dev = rt->dst.dev;
1046 ip_rt_put(rt); 1046 ip_rt_put(rt);
1047 if (!dev) 1047 if (!dev)
1048 return -EINVAL; 1048 return -EINVAL;
@@ -1149,7 +1149,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1149 struct rtable * rt; 1149 struct rtable * rt;
1150 if ((err = ip_route_output_key(net, &rt, &fl)) != 0) 1150 if ((err = ip_route_output_key(net, &rt, &fl)) != 0)
1151 return err; 1151 return err;
1152 dev = rt->u.dst.dev; 1152 dev = rt->dst.dev;
1153 ip_rt_put(rt); 1153 ip_rt_put(rt);
1154 if (!dev) 1154 if (!dev)
1155 return -EINVAL; 1155 return -EINVAL;
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index fb2465811b48..fe3daa7f07a9 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -69,7 +69,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
69 sk->sk_state = TCP_ESTABLISHED; 69 sk->sk_state = TCP_ESTABLISHED;
70 inet->inet_id = jiffies; 70 inet->inet_id = jiffies;
71 71
72 sk_dst_set(sk, &rt->u.dst); 72 sk_dst_set(sk, &rt->dst);
73 return(0); 73 return(0);
74} 74}
75 75
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index bdb6c71e72a6..7569b21a3a2d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -271,7 +271,7 @@ int xrlim_allow(struct dst_entry *dst, int timeout)
271static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt, 271static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
272 int type, int code) 272 int type, int code)
273{ 273{
274 struct dst_entry *dst = &rt->u.dst; 274 struct dst_entry *dst = &rt->dst;
275 int rc = 1; 275 int rc = 1;
276 276
277 if (type > NR_ICMP_TYPES) 277 if (type > NR_ICMP_TYPES)
@@ -327,7 +327,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
327 struct sock *sk; 327 struct sock *sk;
328 struct sk_buff *skb; 328 struct sk_buff *skb;
329 329
330 sk = icmp_sk(dev_net((*rt)->u.dst.dev)); 330 sk = icmp_sk(dev_net((*rt)->dst.dev));
331 if (ip_append_data(sk, icmp_glue_bits, icmp_param, 331 if (ip_append_data(sk, icmp_glue_bits, icmp_param,
332 icmp_param->data_len+icmp_param->head_len, 332 icmp_param->data_len+icmp_param->head_len,
333 icmp_param->head_len, 333 icmp_param->head_len,
@@ -359,7 +359,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
359{ 359{
360 struct ipcm_cookie ipc; 360 struct ipcm_cookie ipc;
361 struct rtable *rt = skb_rtable(skb); 361 struct rtable *rt = skb_rtable(skb);
362 struct net *net = dev_net(rt->u.dst.dev); 362 struct net *net = dev_net(rt->dst.dev);
363 struct sock *sk; 363 struct sock *sk;
364 struct inet_sock *inet; 364 struct inet_sock *inet;
365 __be32 daddr; 365 __be32 daddr;
@@ -427,7 +427,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
427 427
428 if (!rt) 428 if (!rt)
429 goto out; 429 goto out;
430 net = dev_net(rt->u.dst.dev); 430 net = dev_net(rt->dst.dev);
431 431
432 /* 432 /*
433 * Find the original header. It is expected to be valid, of course. 433 * Find the original header. It is expected to be valid, of course.
@@ -596,9 +596,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
596 /* Ugh! */ 596 /* Ugh! */
597 orefdst = skb_in->_skb_refdst; /* save old refdst */ 597 orefdst = skb_in->_skb_refdst; /* save old refdst */
598 err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src, 598 err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
599 RT_TOS(tos), rt2->u.dst.dev); 599 RT_TOS(tos), rt2->dst.dev);
600 600
601 dst_release(&rt2->u.dst); 601 dst_release(&rt2->dst);
602 rt2 = skb_rtable(skb_in); 602 rt2 = skb_rtable(skb_in);
603 skb_in->_skb_refdst = orefdst; /* restore old refdst */ 603 skb_in->_skb_refdst = orefdst; /* restore old refdst */
604 } 604 }
@@ -610,7 +610,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
610 XFRM_LOOKUP_ICMP); 610 XFRM_LOOKUP_ICMP);
611 switch (err) { 611 switch (err) {
612 case 0: 612 case 0:
613 dst_release(&rt->u.dst); 613 dst_release(&rt->dst);
614 rt = rt2; 614 rt = rt2;
615 break; 615 break;
616 case -EPERM: 616 case -EPERM:
@@ -629,7 +629,7 @@ route_done:
629 629
630 /* RFC says return as much as we can without exceeding 576 bytes. */ 630 /* RFC says return as much as we can without exceeding 576 bytes. */
631 631
632 room = dst_mtu(&rt->u.dst); 632 room = dst_mtu(&rt->dst);
633 if (room > 576) 633 if (room > 576)
634 room = 576; 634 room = 576;
635 room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen; 635 room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen;
@@ -972,7 +972,7 @@ int icmp_rcv(struct sk_buff *skb)
972{ 972{
973 struct icmphdr *icmph; 973 struct icmphdr *icmph;
974 struct rtable *rt = skb_rtable(skb); 974 struct rtable *rt = skb_rtable(skb);
975 struct net *net = dev_net(rt->u.dst.dev); 975 struct net *net = dev_net(rt->dst.dev);
976 976
977 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 977 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
978 struct sec_path *sp = skb_sec_path(skb); 978 struct sec_path *sp = skb_sec_path(skb);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 3294f547c481..b5580d422994 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -312,7 +312,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
312 return NULL; 312 return NULL;
313 } 313 }
314 314
315 skb_dst_set(skb, &rt->u.dst); 315 skb_dst_set(skb, &rt->dst);
316 skb->dev = dev; 316 skb->dev = dev;
317 317
318 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 318 skb_reserve(skb, LL_RESERVED_SPACE(dev));
@@ -330,7 +330,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
330 pip->saddr = rt->rt_src; 330 pip->saddr = rt->rt_src;
331 pip->protocol = IPPROTO_IGMP; 331 pip->protocol = IPPROTO_IGMP;
332 pip->tot_len = 0; /* filled in later */ 332 pip->tot_len = 0; /* filled in later */
333 ip_select_ident(pip, &rt->u.dst, NULL); 333 ip_select_ident(pip, &rt->dst, NULL);
334 ((u8*)&pip[1])[0] = IPOPT_RA; 334 ((u8*)&pip[1])[0] = IPOPT_RA;
335 ((u8*)&pip[1])[1] = 4; 335 ((u8*)&pip[1])[1] = 4;
336 ((u8*)&pip[1])[2] = 0; 336 ((u8*)&pip[1])[2] = 0;
@@ -660,7 +660,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
660 return -1; 660 return -1;
661 } 661 }
662 662
663 skb_dst_set(skb, &rt->u.dst); 663 skb_dst_set(skb, &rt->dst);
664 664
665 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 665 skb_reserve(skb, LL_RESERVED_SPACE(dev));
666 666
@@ -676,7 +676,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
676 iph->daddr = dst; 676 iph->daddr = dst;
677 iph->saddr = rt->rt_src; 677 iph->saddr = rt->rt_src;
678 iph->protocol = IPPROTO_IGMP; 678 iph->protocol = IPPROTO_IGMP;
679 ip_select_ident(iph, &rt->u.dst, NULL); 679 ip_select_ident(iph, &rt->dst, NULL);
680 ((u8*)&iph[1])[0] = IPOPT_RA; 680 ((u8*)&iph[1])[0] = IPOPT_RA;
681 ((u8*)&iph[1])[1] = 4; 681 ((u8*)&iph[1])[1] = 4;
682 ((u8*)&iph[1])[2] = 0; 682 ((u8*)&iph[1])[2] = 0;
@@ -1425,7 +1425,7 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
1425 } 1425 }
1426 1426
1427 if (!dev && !ip_route_output_key(net, &rt, &fl)) { 1427 if (!dev && !ip_route_output_key(net, &rt, &fl)) {
1428 dev = rt->u.dst.dev; 1428 dev = rt->dst.dev;
1429 ip_rt_put(rt); 1429 ip_rt_put(rt);
1430 } 1430 }
1431 if (dev) { 1431 if (dev) {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 70eb3507c406..57c9e4d7b805 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -383,7 +383,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
383 goto no_route; 383 goto no_route;
384 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 384 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
385 goto route_err; 385 goto route_err;
386 return &rt->u.dst; 386 return &rt->dst;
387 387
388route_err: 388route_err:
389 ip_rt_put(rt); 389 ip_rt_put(rt);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 56cdf68a074c..99461f09320f 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -87,16 +87,16 @@ int ip_forward(struct sk_buff *skb)
87 if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 87 if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
88 goto sr_failed; 88 goto sr_failed;
89 89
90 if (unlikely(skb->len > dst_mtu(&rt->u.dst) && !skb_is_gso(skb) && 90 if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
91 (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { 91 (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
92 IP_INC_STATS(dev_net(rt->u.dst.dev), IPSTATS_MIB_FRAGFAILS); 92 IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
93 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 93 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
94 htonl(dst_mtu(&rt->u.dst))); 94 htonl(dst_mtu(&rt->dst)));
95 goto drop; 95 goto drop;
96 } 96 }
97 97
98 /* We are about to mangle packet. Copy it! */ 98 /* We are about to mangle packet. Copy it! */
99 if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+rt->u.dst.header_len)) 99 if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+rt->dst.header_len))
100 goto drop; 100 goto drop;
101 iph = ip_hdr(skb); 101 iph = ip_hdr(skb);
102 102
@@ -113,7 +113,7 @@ int ip_forward(struct sk_buff *skb)
113 skb->priority = rt_tos2priority(iph->tos); 113 skb->priority = rt_tos2priority(iph->tos);
114 114
115 return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, 115 return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev,
116 rt->u.dst.dev, ip_forward_finish); 116 rt->dst.dev, ip_forward_finish);
117 117
118sr_failed: 118sr_failed:
119 /* 119 /*
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 32618e11076d..749e54889e82 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -745,7 +745,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
745 goto tx_error; 745 goto tx_error;
746 } 746 }
747 } 747 }
748 tdev = rt->u.dst.dev; 748 tdev = rt->dst.dev;
749 749
750 if (tdev == dev) { 750 if (tdev == dev) {
751 ip_rt_put(rt); 751 ip_rt_put(rt);
@@ -755,7 +755,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
755 755
756 df = tiph->frag_off; 756 df = tiph->frag_off;
757 if (df) 757 if (df)
758 mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen; 758 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
759 else 759 else
760 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; 760 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
761 761
@@ -803,7 +803,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
803 tunnel->err_count = 0; 803 tunnel->err_count = 0;
804 } 804 }
805 805
806 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len; 806 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
807 807
808 if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| 808 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
809 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 809 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
@@ -830,7 +830,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
830 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 830 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
831 IPSKB_REROUTED); 831 IPSKB_REROUTED);
832 skb_dst_drop(skb); 832 skb_dst_drop(skb);
833 skb_dst_set(skb, &rt->u.dst); 833 skb_dst_set(skb, &rt->dst);
834 834
835 /* 835 /*
836 * Push down and install the IPIP header. 836 * Push down and install the IPIP header.
@@ -853,7 +853,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
853 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; 853 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
854#endif 854#endif
855 else 855 else
856 iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT); 856 iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT);
857 } 857 }
858 858
859 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags; 859 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
@@ -915,7 +915,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
915 .proto = IPPROTO_GRE }; 915 .proto = IPPROTO_GRE };
916 struct rtable *rt; 916 struct rtable *rt;
917 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { 917 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
918 tdev = rt->u.dst.dev; 918 tdev = rt->dst.dev;
919 ip_rt_put(rt); 919 ip_rt_put(rt);
920 } 920 }
921 921
@@ -1174,7 +1174,7 @@ static int ipgre_open(struct net_device *dev)
1174 struct rtable *rt; 1174 struct rtable *rt;
1175 if (ip_route_output_key(dev_net(dev), &rt, &fl)) 1175 if (ip_route_output_key(dev_net(dev), &rt, &fl))
1176 return -EADDRNOTAVAIL; 1176 return -EADDRNOTAVAIL;
1177 dev = rt->u.dst.dev; 1177 dev = rt->dst.dev;
1178 ip_rt_put(rt); 1178 ip_rt_put(rt);
1179 if (__in_dev_get_rtnl(dev) == NULL) 1179 if (__in_dev_get_rtnl(dev) == NULL)
1180 return -EADDRNOTAVAIL; 1180 return -EADDRNOTAVAIL;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 08a3b121f908..db47a5a00ed2 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -356,10 +356,10 @@ static int ip_rcv_finish(struct sk_buff *skb)
356 356
357 rt = skb_rtable(skb); 357 rt = skb_rtable(skb);
358 if (rt->rt_type == RTN_MULTICAST) { 358 if (rt->rt_type == RTN_MULTICAST) {
359 IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCAST, 359 IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INMCAST,
360 skb->len); 360 skb->len);
361 } else if (rt->rt_type == RTN_BROADCAST) 361 } else if (rt->rt_type == RTN_BROADCAST)
362 IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INBCAST, 362 IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INBCAST,
363 skb->len); 363 skb->len);
364 364
365 return dst_input(skb); 365 return dst_input(skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9a4a6c96cb0d..6cbeb2e108de 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -151,15 +151,15 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
151 iph->version = 4; 151 iph->version = 4;
152 iph->ihl = 5; 152 iph->ihl = 5;
153 iph->tos = inet->tos; 153 iph->tos = inet->tos;
154 if (ip_dont_fragment(sk, &rt->u.dst)) 154 if (ip_dont_fragment(sk, &rt->dst))
155 iph->frag_off = htons(IP_DF); 155 iph->frag_off = htons(IP_DF);
156 else 156 else
157 iph->frag_off = 0; 157 iph->frag_off = 0;
158 iph->ttl = ip_select_ttl(inet, &rt->u.dst); 158 iph->ttl = ip_select_ttl(inet, &rt->dst);
159 iph->daddr = rt->rt_dst; 159 iph->daddr = rt->rt_dst;
160 iph->saddr = rt->rt_src; 160 iph->saddr = rt->rt_src;
161 iph->protocol = sk->sk_protocol; 161 iph->protocol = sk->sk_protocol;
162 ip_select_ident(iph, &rt->u.dst, sk); 162 ip_select_ident(iph, &rt->dst, sk);
163 163
164 if (opt && opt->optlen) { 164 if (opt && opt->optlen) {
165 iph->ihl += opt->optlen>>2; 165 iph->ihl += opt->optlen>>2;
@@ -240,7 +240,7 @@ int ip_mc_output(struct sk_buff *skb)
240{ 240{
241 struct sock *sk = skb->sk; 241 struct sock *sk = skb->sk;
242 struct rtable *rt = skb_rtable(skb); 242 struct rtable *rt = skb_rtable(skb);
243 struct net_device *dev = rt->u.dst.dev; 243 struct net_device *dev = rt->dst.dev;
244 244
245 /* 245 /*
246 * If the indicated interface is up and running, send the packet. 246 * If the indicated interface is up and running, send the packet.
@@ -359,9 +359,9 @@ int ip_queue_xmit(struct sk_buff *skb)
359 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) 359 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
360 goto no_route; 360 goto no_route;
361 } 361 }
362 sk_setup_caps(sk, &rt->u.dst); 362 sk_setup_caps(sk, &rt->dst);
363 } 363 }
364 skb_dst_set_noref(skb, &rt->u.dst); 364 skb_dst_set_noref(skb, &rt->dst);
365 365
366packet_routed: 366packet_routed:
367 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 367 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
@@ -372,11 +372,11 @@ packet_routed:
372 skb_reset_network_header(skb); 372 skb_reset_network_header(skb);
373 iph = ip_hdr(skb); 373 iph = ip_hdr(skb);
374 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); 374 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
375 if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df) 375 if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
376 iph->frag_off = htons(IP_DF); 376 iph->frag_off = htons(IP_DF);
377 else 377 else
378 iph->frag_off = 0; 378 iph->frag_off = 0;
379 iph->ttl = ip_select_ttl(inet, &rt->u.dst); 379 iph->ttl = ip_select_ttl(inet, &rt->dst);
380 iph->protocol = sk->sk_protocol; 380 iph->protocol = sk->sk_protocol;
381 iph->saddr = rt->rt_src; 381 iph->saddr = rt->rt_src;
382 iph->daddr = rt->rt_dst; 382 iph->daddr = rt->rt_dst;
@@ -387,7 +387,7 @@ packet_routed:
387 ip_options_build(skb, opt, inet->inet_daddr, rt, 0); 387 ip_options_build(skb, opt, inet->inet_daddr, rt, 0);
388 } 388 }
389 389
390 ip_select_ident_more(iph, &rt->u.dst, sk, 390 ip_select_ident_more(iph, &rt->dst, sk,
391 (skb_shinfo(skb)->gso_segs ?: 1) - 1); 391 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
392 392
393 skb->priority = sk->sk_priority; 393 skb->priority = sk->sk_priority;
@@ -452,7 +452,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
452 struct rtable *rt = skb_rtable(skb); 452 struct rtable *rt = skb_rtable(skb);
453 int err = 0; 453 int err = 0;
454 454
455 dev = rt->u.dst.dev; 455 dev = rt->dst.dev;
456 456
457 /* 457 /*
458 * Point into the IP datagram header. 458 * Point into the IP datagram header.
@@ -473,7 +473,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
473 */ 473 */
474 474
475 hlen = iph->ihl * 4; 475 hlen = iph->ihl * 4;
476 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */ 476 mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
477#ifdef CONFIG_BRIDGE_NETFILTER 477#ifdef CONFIG_BRIDGE_NETFILTER
478 if (skb->nf_bridge) 478 if (skb->nf_bridge)
479 mtu -= nf_bridge_mtu_reduction(skb); 479 mtu -= nf_bridge_mtu_reduction(skb);
@@ -586,7 +586,7 @@ slow_path:
586 * we need to make room for the encapsulating header 586 * we need to make room for the encapsulating header
587 */ 587 */
588 pad = nf_bridge_pad(skb); 588 pad = nf_bridge_pad(skb);
589 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad); 589 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, pad);
590 mtu -= pad; 590 mtu -= pad;
591 591
592 /* 592 /*
@@ -833,13 +833,13 @@ int ip_append_data(struct sock *sk,
833 */ 833 */
834 *rtp = NULL; 834 *rtp = NULL;
835 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ? 835 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
836 rt->u.dst.dev->mtu : 836 rt->dst.dev->mtu :
837 dst_mtu(rt->u.dst.path); 837 dst_mtu(rt->dst.path);
838 inet->cork.dst = &rt->u.dst; 838 inet->cork.dst = &rt->dst;
839 inet->cork.length = 0; 839 inet->cork.length = 0;
840 sk->sk_sndmsg_page = NULL; 840 sk->sk_sndmsg_page = NULL;
841 sk->sk_sndmsg_off = 0; 841 sk->sk_sndmsg_off = 0;
842 if ((exthdrlen = rt->u.dst.header_len) != 0) { 842 if ((exthdrlen = rt->dst.header_len) != 0) {
843 length += exthdrlen; 843 length += exthdrlen;
844 transhdrlen += exthdrlen; 844 transhdrlen += exthdrlen;
845 } 845 }
@@ -852,7 +852,7 @@ int ip_append_data(struct sock *sk,
852 exthdrlen = 0; 852 exthdrlen = 0;
853 mtu = inet->cork.fragsize; 853 mtu = inet->cork.fragsize;
854 } 854 }
855 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); 855 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
856 856
857 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 857 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
858 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 858 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
@@ -869,14 +869,14 @@ int ip_append_data(struct sock *sk,
869 */ 869 */
870 if (transhdrlen && 870 if (transhdrlen &&
871 length + fragheaderlen <= mtu && 871 length + fragheaderlen <= mtu &&
872 rt->u.dst.dev->features & NETIF_F_V4_CSUM && 872 rt->dst.dev->features & NETIF_F_V4_CSUM &&
873 !exthdrlen) 873 !exthdrlen)
874 csummode = CHECKSUM_PARTIAL; 874 csummode = CHECKSUM_PARTIAL;
875 875
876 inet->cork.length += length; 876 inet->cork.length += length;
877 if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) && 877 if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) &&
878 (sk->sk_protocol == IPPROTO_UDP) && 878 (sk->sk_protocol == IPPROTO_UDP) &&
879 (rt->u.dst.dev->features & NETIF_F_UFO)) { 879 (rt->dst.dev->features & NETIF_F_UFO)) {
880 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len, 880 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
881 fragheaderlen, transhdrlen, mtu, 881 fragheaderlen, transhdrlen, mtu,
882 flags); 882 flags);
@@ -924,7 +924,7 @@ alloc_new_skb:
924 fraglen = datalen + fragheaderlen; 924 fraglen = datalen + fragheaderlen;
925 925
926 if ((flags & MSG_MORE) && 926 if ((flags & MSG_MORE) &&
927 !(rt->u.dst.dev->features&NETIF_F_SG)) 927 !(rt->dst.dev->features&NETIF_F_SG))
928 alloclen = mtu; 928 alloclen = mtu;
929 else 929 else
930 alloclen = datalen + fragheaderlen; 930 alloclen = datalen + fragheaderlen;
@@ -935,7 +935,7 @@ alloc_new_skb:
935 * the last. 935 * the last.
936 */ 936 */
937 if (datalen == length + fraggap) 937 if (datalen == length + fraggap)
938 alloclen += rt->u.dst.trailer_len; 938 alloclen += rt->dst.trailer_len;
939 939
940 if (transhdrlen) { 940 if (transhdrlen) {
941 skb = sock_alloc_send_skb(sk, 941 skb = sock_alloc_send_skb(sk,
@@ -1008,7 +1008,7 @@ alloc_new_skb:
1008 if (copy > length) 1008 if (copy > length)
1009 copy = length; 1009 copy = length;
1010 1010
1011 if (!(rt->u.dst.dev->features&NETIF_F_SG)) { 1011 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1012 unsigned int off; 1012 unsigned int off;
1013 1013
1014 off = skb->len; 1014 off = skb->len;
@@ -1103,10 +1103,10 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1103 if (inet->cork.flags & IPCORK_OPT) 1103 if (inet->cork.flags & IPCORK_OPT)
1104 opt = inet->cork.opt; 1104 opt = inet->cork.opt;
1105 1105
1106 if (!(rt->u.dst.dev->features&NETIF_F_SG)) 1106 if (!(rt->dst.dev->features&NETIF_F_SG))
1107 return -EOPNOTSUPP; 1107 return -EOPNOTSUPP;
1108 1108
1109 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); 1109 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1110 mtu = inet->cork.fragsize; 1110 mtu = inet->cork.fragsize;
1111 1111
1112 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 1112 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
@@ -1122,7 +1122,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1122 1122
1123 inet->cork.length += size; 1123 inet->cork.length += size;
1124 if ((sk->sk_protocol == IPPROTO_UDP) && 1124 if ((sk->sk_protocol == IPPROTO_UDP) &&
1125 (rt->u.dst.dev->features & NETIF_F_UFO)) { 1125 (rt->dst.dev->features & NETIF_F_UFO)) {
1126 skb_shinfo(skb)->gso_size = mtu - fragheaderlen; 1126 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1127 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 1127 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1128 } 1128 }
@@ -1274,8 +1274,8 @@ int ip_push_pending_frames(struct sock *sk)
1274 * If local_df is set too, we still allow to fragment this frame 1274 * If local_df is set too, we still allow to fragment this frame
1275 * locally. */ 1275 * locally. */
1276 if (inet->pmtudisc >= IP_PMTUDISC_DO || 1276 if (inet->pmtudisc >= IP_PMTUDISC_DO ||
1277 (skb->len <= dst_mtu(&rt->u.dst) && 1277 (skb->len <= dst_mtu(&rt->dst) &&
1278 ip_dont_fragment(sk, &rt->u.dst))) 1278 ip_dont_fragment(sk, &rt->dst)))
1279 df = htons(IP_DF); 1279 df = htons(IP_DF);
1280 1280
1281 if (inet->cork.flags & IPCORK_OPT) 1281 if (inet->cork.flags & IPCORK_OPT)
@@ -1284,7 +1284,7 @@ int ip_push_pending_frames(struct sock *sk)
1284 if (rt->rt_type == RTN_MULTICAST) 1284 if (rt->rt_type == RTN_MULTICAST)
1285 ttl = inet->mc_ttl; 1285 ttl = inet->mc_ttl;
1286 else 1286 else
1287 ttl = ip_select_ttl(inet, &rt->u.dst); 1287 ttl = ip_select_ttl(inet, &rt->dst);
1288 1288
1289 iph = (struct iphdr *)skb->data; 1289 iph = (struct iphdr *)skb->data;
1290 iph->version = 4; 1290 iph->version = 4;
@@ -1295,7 +1295,7 @@ int ip_push_pending_frames(struct sock *sk)
1295 } 1295 }
1296 iph->tos = inet->tos; 1296 iph->tos = inet->tos;
1297 iph->frag_off = df; 1297 iph->frag_off = df;
1298 ip_select_ident(iph, &rt->u.dst, sk); 1298 ip_select_ident(iph, &rt->dst, sk);
1299 iph->ttl = ttl; 1299 iph->ttl = ttl;
1300 iph->protocol = sk->sk_protocol; 1300 iph->protocol = sk->sk_protocol;
1301 iph->saddr = rt->rt_src; 1301 iph->saddr = rt->rt_src;
@@ -1308,7 +1308,7 @@ int ip_push_pending_frames(struct sock *sk)
1308 * on dst refcount 1308 * on dst refcount
1309 */ 1309 */
1310 inet->cork.dst = NULL; 1310 inet->cork.dst = NULL;
1311 skb_dst_set(skb, &rt->u.dst); 1311 skb_dst_set(skb, &rt->dst);
1312 1312
1313 if (iph->protocol == IPPROTO_ICMP) 1313 if (iph->protocol == IPPROTO_ICMP)
1314 icmp_out_count(net, ((struct icmphdr *) 1314 icmp_out_count(net, ((struct icmphdr *)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 7fd636711037..ec036731a70b 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -435,7 +435,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
435 goto tx_error_icmp; 435 goto tx_error_icmp;
436 } 436 }
437 } 437 }
438 tdev = rt->u.dst.dev; 438 tdev = rt->dst.dev;
439 439
440 if (tdev == dev) { 440 if (tdev == dev) {
441 ip_rt_put(rt); 441 ip_rt_put(rt);
@@ -446,7 +446,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
446 df |= old_iph->frag_off & htons(IP_DF); 446 df |= old_iph->frag_off & htons(IP_DF);
447 447
448 if (df) { 448 if (df) {
449 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); 449 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
450 450
451 if (mtu < 68) { 451 if (mtu < 68) {
452 stats->collisions++; 452 stats->collisions++;
@@ -503,7 +503,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
503 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 503 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
504 IPSKB_REROUTED); 504 IPSKB_REROUTED);
505 skb_dst_drop(skb); 505 skb_dst_drop(skb);
506 skb_dst_set(skb, &rt->u.dst); 506 skb_dst_set(skb, &rt->dst);
507 507
508 /* 508 /*
509 * Push down and install the IPIP header. 509 * Push down and install the IPIP header.
@@ -552,7 +552,7 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
552 .proto = IPPROTO_IPIP }; 552 .proto = IPPROTO_IPIP };
553 struct rtable *rt; 553 struct rtable *rt;
554 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { 554 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
555 tdev = rt->u.dst.dev; 555 tdev = rt->dst.dev;
556 ip_rt_put(rt); 556 ip_rt_put(rt);
557 } 557 }
558 dev->flags |= IFF_POINTOPOINT; 558 dev->flags |= IFF_POINTOPOINT;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 856123fe32f9..8418afc357ee 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1551,9 +1551,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1551 goto out_free; 1551 goto out_free;
1552 } 1552 }
1553 1553
1554 dev = rt->u.dst.dev; 1554 dev = rt->dst.dev;
1555 1555
1556 if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) { 1556 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1557 /* Do not fragment multicasts. Alas, IPv4 does not 1557 /* Do not fragment multicasts. Alas, IPv4 does not
1558 allow to send ICMP, so that packets will disappear 1558 allow to send ICMP, so that packets will disappear
1559 to blackhole. 1559 to blackhole.
@@ -1564,7 +1564,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1564 goto out_free; 1564 goto out_free;
1565 } 1565 }
1566 1566
1567 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len; 1567 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1568 1568
1569 if (skb_cow(skb, encap)) { 1569 if (skb_cow(skb, encap)) {
1570 ip_rt_put(rt); 1570 ip_rt_put(rt);
@@ -1575,7 +1575,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1575 vif->bytes_out += skb->len; 1575 vif->bytes_out += skb->len;
1576 1576
1577 skb_dst_drop(skb); 1577 skb_dst_drop(skb);
1578 skb_dst_set(skb, &rt->u.dst); 1578 skb_dst_set(skb, &rt->dst);
1579 ip_decrease_ttl(ip_hdr(skb)); 1579 ip_decrease_ttl(ip_hdr(skb));
1580 1580
1581 /* FIXME: forward and output firewalls used to be called here. 1581 /* FIXME: forward and output firewalls used to be called here.
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 07de855e2175..cfbc79af21c3 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -43,7 +43,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
43 43
44 /* Drop old route. */ 44 /* Drop old route. */
45 skb_dst_drop(skb); 45 skb_dst_drop(skb);
46 skb_dst_set(skb, &rt->u.dst); 46 skb_dst_set(skb, &rt->dst);
47 } else { 47 } else {
48 /* non-local src, find valid iif to satisfy 48 /* non-local src, find valid iif to satisfy
49 * rp-filter when calling ip_route_input. */ 49 * rp-filter when calling ip_route_input. */
@@ -53,11 +53,11 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
53 53
54 orefdst = skb->_skb_refdst; 54 orefdst = skb->_skb_refdst;
55 if (ip_route_input(skb, iph->daddr, iph->saddr, 55 if (ip_route_input(skb, iph->daddr, iph->saddr,
56 RT_TOS(iph->tos), rt->u.dst.dev) != 0) { 56 RT_TOS(iph->tos), rt->dst.dev) != 0) {
57 dst_release(&rt->u.dst); 57 dst_release(&rt->dst);
58 return -1; 58 return -1;
59 } 59 }
60 dst_release(&rt->u.dst); 60 dst_release(&rt->dst);
61 refdst_drop(orefdst); 61 refdst_drop(orefdst);
62 } 62 }
63 63
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 66cc3befcd44..009a7b2aa1ef 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -325,24 +325,24 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
325 int err; 325 int err;
326 struct rtable *rt = *rtp; 326 struct rtable *rt = *rtp;
327 327
328 if (length > rt->u.dst.dev->mtu) { 328 if (length > rt->dst.dev->mtu) {
329 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, 329 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
330 rt->u.dst.dev->mtu); 330 rt->dst.dev->mtu);
331 return -EMSGSIZE; 331 return -EMSGSIZE;
332 } 332 }
333 if (flags&MSG_PROBE) 333 if (flags&MSG_PROBE)
334 goto out; 334 goto out;
335 335
336 skb = sock_alloc_send_skb(sk, 336 skb = sock_alloc_send_skb(sk,
337 length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, 337 length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
338 flags & MSG_DONTWAIT, &err); 338 flags & MSG_DONTWAIT, &err);
339 if (skb == NULL) 339 if (skb == NULL)
340 goto error; 340 goto error;
341 skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); 341 skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
342 342
343 skb->priority = sk->sk_priority; 343 skb->priority = sk->sk_priority;
344 skb->mark = sk->sk_mark; 344 skb->mark = sk->sk_mark;
345 skb_dst_set(skb, &rt->u.dst); 345 skb_dst_set(skb, &rt->dst);
346 *rtp = NULL; 346 *rtp = NULL;
347 347
348 skb_reset_network_header(skb); 348 skb_reset_network_header(skb);
@@ -375,7 +375,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
375 iph->check = 0; 375 iph->check = 0;
376 iph->tot_len = htons(length); 376 iph->tot_len = htons(length);
377 if (!iph->id) 377 if (!iph->id)
378 ip_select_ident(iph, &rt->u.dst, NULL); 378 ip_select_ident(iph, &rt->dst, NULL);
379 379
380 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 380 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
381 } 381 }
@@ -384,7 +384,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
384 skb_transport_header(skb))->type); 384 skb_transport_header(skb))->type);
385 385
386 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, 386 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
387 rt->u.dst.dev, dst_output); 387 rt->dst.dev, dst_output);
388 if (err > 0) 388 if (err > 0)
389 err = net_xmit_errno(err); 389 err = net_xmit_errno(err);
390 if (err) 390 if (err)
@@ -606,7 +606,7 @@ out:
606 return len; 606 return len;
607 607
608do_confirm: 608do_confirm:
609 dst_confirm(&rt->u.dst); 609 dst_confirm(&rt->dst);
610 if (!(msg->msg_flags & MSG_PROBE) || len) 610 if (!(msg->msg_flags & MSG_PROBE) || len)
611 goto back_from_confirm; 611 goto back_from_confirm;
612 err = 0; 612 err = 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 883b5c7195ac..a291edbbc97f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -286,10 +286,10 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
286 rcu_read_lock_bh(); 286 rcu_read_lock_bh();
287 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); 287 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
288 while (r) { 288 while (r) {
289 if (dev_net(r->u.dst.dev) == seq_file_net(seq) && 289 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
290 r->rt_genid == st->genid) 290 r->rt_genid == st->genid)
291 return r; 291 return r;
292 r = rcu_dereference_bh(r->u.dst.rt_next); 292 r = rcu_dereference_bh(r->dst.rt_next);
293 } 293 }
294 rcu_read_unlock_bh(); 294 rcu_read_unlock_bh();
295 } 295 }
@@ -301,7 +301,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
301{ 301{
302 struct rt_cache_iter_state *st = seq->private; 302 struct rt_cache_iter_state *st = seq->private;
303 303
304 r = r->u.dst.rt_next; 304 r = r->dst.rt_next;
305 while (!r) { 305 while (!r) {
306 rcu_read_unlock_bh(); 306 rcu_read_unlock_bh();
307 do { 307 do {
@@ -319,7 +319,7 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq,
319{ 319{
320 struct rt_cache_iter_state *st = seq->private; 320 struct rt_cache_iter_state *st = seq->private;
321 while ((r = __rt_cache_get_next(seq, r)) != NULL) { 321 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
322 if (dev_net(r->u.dst.dev) != seq_file_net(seq)) 322 if (dev_net(r->dst.dev) != seq_file_net(seq))
323 continue; 323 continue;
324 if (r->rt_genid == st->genid) 324 if (r->rt_genid == st->genid)
325 break; 325 break;
@@ -377,19 +377,19 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
377 377
378 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" 378 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
379 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", 379 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
380 r->u.dst.dev ? r->u.dst.dev->name : "*", 380 r->dst.dev ? r->dst.dev->name : "*",
381 (__force u32)r->rt_dst, 381 (__force u32)r->rt_dst,
382 (__force u32)r->rt_gateway, 382 (__force u32)r->rt_gateway,
383 r->rt_flags, atomic_read(&r->u.dst.__refcnt), 383 r->rt_flags, atomic_read(&r->dst.__refcnt),
384 r->u.dst.__use, 0, (__force u32)r->rt_src, 384 r->dst.__use, 0, (__force u32)r->rt_src,
385 (dst_metric(&r->u.dst, RTAX_ADVMSS) ? 385 (dst_metric(&r->dst, RTAX_ADVMSS) ?
386 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0), 386 (int)dst_metric(&r->dst, RTAX_ADVMSS) + 40 : 0),
387 dst_metric(&r->u.dst, RTAX_WINDOW), 387 dst_metric(&r->dst, RTAX_WINDOW),
388 (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) + 388 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
389 dst_metric(&r->u.dst, RTAX_RTTVAR)), 389 dst_metric(&r->dst, RTAX_RTTVAR)),
390 r->fl.fl4_tos, 390 r->fl.fl4_tos,
391 r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1, 391 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
392 r->u.dst.hh ? (r->u.dst.hh->hh_output == 392 r->dst.hh ? (r->dst.hh->hh_output ==
393 dev_queue_xmit) : 0, 393 dev_queue_xmit) : 0,
394 r->rt_spec_dst, &len); 394 r->rt_spec_dst, &len);
395 395
@@ -608,13 +608,13 @@ static inline int ip_rt_proc_init(void)
608 608
609static inline void rt_free(struct rtable *rt) 609static inline void rt_free(struct rtable *rt)
610{ 610{
611 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 611 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
612} 612}
613 613
614static inline void rt_drop(struct rtable *rt) 614static inline void rt_drop(struct rtable *rt)
615{ 615{
616 ip_rt_put(rt); 616 ip_rt_put(rt);
617 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 617 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
618} 618}
619 619
620static inline int rt_fast_clean(struct rtable *rth) 620static inline int rt_fast_clean(struct rtable *rth)
@@ -622,13 +622,13 @@ static inline int rt_fast_clean(struct rtable *rth)
622 /* Kill broadcast/multicast entries very aggresively, if they 622 /* Kill broadcast/multicast entries very aggresively, if they
623 collide in hash table with more useful entries */ 623 collide in hash table with more useful entries */
624 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && 624 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
625 rth->fl.iif && rth->u.dst.rt_next; 625 rth->fl.iif && rth->dst.rt_next;
626} 626}
627 627
628static inline int rt_valuable(struct rtable *rth) 628static inline int rt_valuable(struct rtable *rth)
629{ 629{
630 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) || 630 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
631 rth->u.dst.expires; 631 rth->dst.expires;
632} 632}
633 633
634static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2) 634static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
@@ -636,15 +636,15 @@ static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long t
636 unsigned long age; 636 unsigned long age;
637 int ret = 0; 637 int ret = 0;
638 638
639 if (atomic_read(&rth->u.dst.__refcnt)) 639 if (atomic_read(&rth->dst.__refcnt))
640 goto out; 640 goto out;
641 641
642 ret = 1; 642 ret = 1;
643 if (rth->u.dst.expires && 643 if (rth->dst.expires &&
644 time_after_eq(jiffies, rth->u.dst.expires)) 644 time_after_eq(jiffies, rth->dst.expires))
645 goto out; 645 goto out;
646 646
647 age = jiffies - rth->u.dst.lastuse; 647 age = jiffies - rth->dst.lastuse;
648 ret = 0; 648 ret = 0;
649 if ((age <= tmo1 && !rt_fast_clean(rth)) || 649 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
650 (age <= tmo2 && rt_valuable(rth))) 650 (age <= tmo2 && rt_valuable(rth)))
@@ -660,7 +660,7 @@ out: return ret;
660 */ 660 */
661static inline u32 rt_score(struct rtable *rt) 661static inline u32 rt_score(struct rtable *rt)
662{ 662{
663 u32 score = jiffies - rt->u.dst.lastuse; 663 u32 score = jiffies - rt->dst.lastuse;
664 664
665 score = ~score & ~(3<<30); 665 score = ~score & ~(3<<30);
666 666
@@ -700,12 +700,12 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
700 700
701static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) 701static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
702{ 702{
703 return net_eq(dev_net(rt1->u.dst.dev), dev_net(rt2->u.dst.dev)); 703 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
704} 704}
705 705
706static inline int rt_is_expired(struct rtable *rth) 706static inline int rt_is_expired(struct rtable *rth)
707{ 707{
708 return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev)); 708 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
709} 709}
710 710
711/* 711/*
@@ -734,7 +734,7 @@ static void rt_do_flush(int process_context)
734 rth = rt_hash_table[i].chain; 734 rth = rt_hash_table[i].chain;
735 735
736 /* defer releasing the head of the list after spin_unlock */ 736 /* defer releasing the head of the list after spin_unlock */
737 for (tail = rth; tail; tail = tail->u.dst.rt_next) 737 for (tail = rth; tail; tail = tail->dst.rt_next)
738 if (!rt_is_expired(tail)) 738 if (!rt_is_expired(tail))
739 break; 739 break;
740 if (rth != tail) 740 if (rth != tail)
@@ -743,9 +743,9 @@ static void rt_do_flush(int process_context)
743 /* call rt_free on entries after the tail requiring flush */ 743 /* call rt_free on entries after the tail requiring flush */
744 prev = &rt_hash_table[i].chain; 744 prev = &rt_hash_table[i].chain;
745 for (p = *prev; p; p = next) { 745 for (p = *prev; p; p = next) {
746 next = p->u.dst.rt_next; 746 next = p->dst.rt_next;
747 if (!rt_is_expired(p)) { 747 if (!rt_is_expired(p)) {
748 prev = &p->u.dst.rt_next; 748 prev = &p->dst.rt_next;
749 } else { 749 } else {
750 *prev = next; 750 *prev = next;
751 rt_free(p); 751 rt_free(p);
@@ -760,7 +760,7 @@ static void rt_do_flush(int process_context)
760 spin_unlock_bh(rt_hash_lock_addr(i)); 760 spin_unlock_bh(rt_hash_lock_addr(i));
761 761
762 for (; rth != tail; rth = next) { 762 for (; rth != tail; rth = next) {
763 next = rth->u.dst.rt_next; 763 next = rth->dst.rt_next;
764 rt_free(rth); 764 rt_free(rth);
765 } 765 }
766 } 766 }
@@ -791,7 +791,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
791 while (aux != rth) { 791 while (aux != rth) {
792 if (compare_hash_inputs(&aux->fl, &rth->fl)) 792 if (compare_hash_inputs(&aux->fl, &rth->fl))
793 return 0; 793 return 0;
794 aux = aux->u.dst.rt_next; 794 aux = aux->dst.rt_next;
795 } 795 }
796 return ONE; 796 return ONE;
797} 797}
@@ -831,18 +831,18 @@ static void rt_check_expire(void)
831 length = 0; 831 length = 0;
832 spin_lock_bh(rt_hash_lock_addr(i)); 832 spin_lock_bh(rt_hash_lock_addr(i));
833 while ((rth = *rthp) != NULL) { 833 while ((rth = *rthp) != NULL) {
834 prefetch(rth->u.dst.rt_next); 834 prefetch(rth->dst.rt_next);
835 if (rt_is_expired(rth)) { 835 if (rt_is_expired(rth)) {
836 *rthp = rth->u.dst.rt_next; 836 *rthp = rth->dst.rt_next;
837 rt_free(rth); 837 rt_free(rth);
838 continue; 838 continue;
839 } 839 }
840 if (rth->u.dst.expires) { 840 if (rth->dst.expires) {
841 /* Entry is expired even if it is in use */ 841 /* Entry is expired even if it is in use */
842 if (time_before_eq(jiffies, rth->u.dst.expires)) { 842 if (time_before_eq(jiffies, rth->dst.expires)) {
843nofree: 843nofree:
844 tmo >>= 1; 844 tmo >>= 1;
845 rthp = &rth->u.dst.rt_next; 845 rthp = &rth->dst.rt_next;
846 /* 846 /*
847 * We only count entries on 847 * We only count entries on
848 * a chain with equal hash inputs once 848 * a chain with equal hash inputs once
@@ -858,7 +858,7 @@ nofree:
858 goto nofree; 858 goto nofree;
859 859
860 /* Cleanup aged off entries. */ 860 /* Cleanup aged off entries. */
861 *rthp = rth->u.dst.rt_next; 861 *rthp = rth->dst.rt_next;
862 rt_free(rth); 862 rt_free(rth);
863 } 863 }
864 spin_unlock_bh(rt_hash_lock_addr(i)); 864 spin_unlock_bh(rt_hash_lock_addr(i));
@@ -999,10 +999,10 @@ static int rt_garbage_collect(struct dst_ops *ops)
999 if (!rt_is_expired(rth) && 999 if (!rt_is_expired(rth) &&
1000 !rt_may_expire(rth, tmo, expire)) { 1000 !rt_may_expire(rth, tmo, expire)) {
1001 tmo >>= 1; 1001 tmo >>= 1;
1002 rthp = &rth->u.dst.rt_next; 1002 rthp = &rth->dst.rt_next;
1003 continue; 1003 continue;
1004 } 1004 }
1005 *rthp = rth->u.dst.rt_next; 1005 *rthp = rth->dst.rt_next;
1006 rt_free(rth); 1006 rt_free(rth);
1007 goal--; 1007 goal--;
1008 } 1008 }
@@ -1068,7 +1068,7 @@ static int slow_chain_length(const struct rtable *head)
1068 1068
1069 while (rth) { 1069 while (rth) {
1070 length += has_noalias(head, rth); 1070 length += has_noalias(head, rth);
1071 rth = rth->u.dst.rt_next; 1071 rth = rth->dst.rt_next;
1072 } 1072 }
1073 return length >> FRACT_BITS; 1073 return length >> FRACT_BITS;
1074} 1074}
@@ -1090,7 +1090,7 @@ restart:
1090 candp = NULL; 1090 candp = NULL;
1091 now = jiffies; 1091 now = jiffies;
1092 1092
1093 if (!rt_caching(dev_net(rt->u.dst.dev))) { 1093 if (!rt_caching(dev_net(rt->dst.dev))) {
1094 /* 1094 /*
1095 * If we're not caching, just tell the caller we 1095 * If we're not caching, just tell the caller we
1096 * were successful and don't touch the route. The 1096 * were successful and don't touch the route. The
@@ -1108,7 +1108,7 @@ restart:
1108 */ 1108 */
1109 1109
1110 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { 1110 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1111 int err = arp_bind_neighbour(&rt->u.dst); 1111 int err = arp_bind_neighbour(&rt->dst);
1112 if (err) { 1112 if (err) {
1113 if (net_ratelimit()) 1113 if (net_ratelimit())
1114 printk(KERN_WARNING 1114 printk(KERN_WARNING
@@ -1127,19 +1127,19 @@ restart:
1127 spin_lock_bh(rt_hash_lock_addr(hash)); 1127 spin_lock_bh(rt_hash_lock_addr(hash));
1128 while ((rth = *rthp) != NULL) { 1128 while ((rth = *rthp) != NULL) {
1129 if (rt_is_expired(rth)) { 1129 if (rt_is_expired(rth)) {
1130 *rthp = rth->u.dst.rt_next; 1130 *rthp = rth->dst.rt_next;
1131 rt_free(rth); 1131 rt_free(rth);
1132 continue; 1132 continue;
1133 } 1133 }
1134 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) { 1134 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
1135 /* Put it first */ 1135 /* Put it first */
1136 *rthp = rth->u.dst.rt_next; 1136 *rthp = rth->dst.rt_next;
1137 /* 1137 /*
1138 * Since lookup is lockfree, the deletion 1138 * Since lookup is lockfree, the deletion
1139 * must be visible to another weakly ordered CPU before 1139 * must be visible to another weakly ordered CPU before
1140 * the insertion at the start of the hash chain. 1140 * the insertion at the start of the hash chain.
1141 */ 1141 */
1142 rcu_assign_pointer(rth->u.dst.rt_next, 1142 rcu_assign_pointer(rth->dst.rt_next,
1143 rt_hash_table[hash].chain); 1143 rt_hash_table[hash].chain);
1144 /* 1144 /*
1145 * Since lookup is lockfree, the update writes 1145 * Since lookup is lockfree, the update writes
@@ -1147,18 +1147,18 @@ restart:
1147 */ 1147 */
1148 rcu_assign_pointer(rt_hash_table[hash].chain, rth); 1148 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1149 1149
1150 dst_use(&rth->u.dst, now); 1150 dst_use(&rth->dst, now);
1151 spin_unlock_bh(rt_hash_lock_addr(hash)); 1151 spin_unlock_bh(rt_hash_lock_addr(hash));
1152 1152
1153 rt_drop(rt); 1153 rt_drop(rt);
1154 if (rp) 1154 if (rp)
1155 *rp = rth; 1155 *rp = rth;
1156 else 1156 else
1157 skb_dst_set(skb, &rth->u.dst); 1157 skb_dst_set(skb, &rth->dst);
1158 return 0; 1158 return 0;
1159 } 1159 }
1160 1160
1161 if (!atomic_read(&rth->u.dst.__refcnt)) { 1161 if (!atomic_read(&rth->dst.__refcnt)) {
1162 u32 score = rt_score(rth); 1162 u32 score = rt_score(rth);
1163 1163
1164 if (score <= min_score) { 1164 if (score <= min_score) {
@@ -1170,7 +1170,7 @@ restart:
1170 1170
1171 chain_length++; 1171 chain_length++;
1172 1172
1173 rthp = &rth->u.dst.rt_next; 1173 rthp = &rth->dst.rt_next;
1174 } 1174 }
1175 1175
1176 if (cand) { 1176 if (cand) {
@@ -1181,17 +1181,17 @@ restart:
1181 * only 2 entries per bucket. We will see. 1181 * only 2 entries per bucket. We will see.
1182 */ 1182 */
1183 if (chain_length > ip_rt_gc_elasticity) { 1183 if (chain_length > ip_rt_gc_elasticity) {
1184 *candp = cand->u.dst.rt_next; 1184 *candp = cand->dst.rt_next;
1185 rt_free(cand); 1185 rt_free(cand);
1186 } 1186 }
1187 } else { 1187 } else {
1188 if (chain_length > rt_chain_length_max && 1188 if (chain_length > rt_chain_length_max &&
1189 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { 1189 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1190 struct net *net = dev_net(rt->u.dst.dev); 1190 struct net *net = dev_net(rt->dst.dev);
1191 int num = ++net->ipv4.current_rt_cache_rebuild_count; 1191 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1192 if (!rt_caching(net)) { 1192 if (!rt_caching(net)) {
1193 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", 1193 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1194 rt->u.dst.dev->name, num); 1194 rt->dst.dev->name, num);
1195 } 1195 }
1196 rt_emergency_hash_rebuild(net); 1196 rt_emergency_hash_rebuild(net);
1197 spin_unlock_bh(rt_hash_lock_addr(hash)); 1197 spin_unlock_bh(rt_hash_lock_addr(hash));
@@ -1206,7 +1206,7 @@ restart:
1206 route or unicast forwarding path. 1206 route or unicast forwarding path.
1207 */ 1207 */
1208 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { 1208 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1209 int err = arp_bind_neighbour(&rt->u.dst); 1209 int err = arp_bind_neighbour(&rt->dst);
1210 if (err) { 1210 if (err) {
1211 spin_unlock_bh(rt_hash_lock_addr(hash)); 1211 spin_unlock_bh(rt_hash_lock_addr(hash));
1212 1212
@@ -1237,14 +1237,14 @@ restart:
1237 } 1237 }
1238 } 1238 }
1239 1239
1240 rt->u.dst.rt_next = rt_hash_table[hash].chain; 1240 rt->dst.rt_next = rt_hash_table[hash].chain;
1241 1241
1242#if RT_CACHE_DEBUG >= 2 1242#if RT_CACHE_DEBUG >= 2
1243 if (rt->u.dst.rt_next) { 1243 if (rt->dst.rt_next) {
1244 struct rtable *trt; 1244 struct rtable *trt;
1245 printk(KERN_DEBUG "rt_cache @%02x: %pI4", 1245 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1246 hash, &rt->rt_dst); 1246 hash, &rt->rt_dst);
1247 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next) 1247 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
1248 printk(" . %pI4", &trt->rt_dst); 1248 printk(" . %pI4", &trt->rt_dst);
1249 printk("\n"); 1249 printk("\n");
1250 } 1250 }
@@ -1262,7 +1262,7 @@ skip_hashing:
1262 if (rp) 1262 if (rp)
1263 *rp = rt; 1263 *rp = rt;
1264 else 1264 else
1265 skb_dst_set(skb, &rt->u.dst); 1265 skb_dst_set(skb, &rt->dst);
1266 return 0; 1266 return 0;
1267} 1267}
1268 1268
@@ -1334,11 +1334,11 @@ static void rt_del(unsigned hash, struct rtable *rt)
1334 ip_rt_put(rt); 1334 ip_rt_put(rt);
1335 while ((aux = *rthp) != NULL) { 1335 while ((aux = *rthp) != NULL) {
1336 if (aux == rt || rt_is_expired(aux)) { 1336 if (aux == rt || rt_is_expired(aux)) {
1337 *rthp = aux->u.dst.rt_next; 1337 *rthp = aux->dst.rt_next;
1338 rt_free(aux); 1338 rt_free(aux);
1339 continue; 1339 continue;
1340 } 1340 }
1341 rthp = &aux->u.dst.rt_next; 1341 rthp = &aux->dst.rt_next;
1342 } 1342 }
1343 spin_unlock_bh(rt_hash_lock_addr(hash)); 1343 spin_unlock_bh(rt_hash_lock_addr(hash));
1344} 1344}
@@ -1392,19 +1392,19 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1392 rth->fl.oif != ikeys[k] || 1392 rth->fl.oif != ikeys[k] ||
1393 rth->fl.iif != 0 || 1393 rth->fl.iif != 0 ||
1394 rt_is_expired(rth) || 1394 rt_is_expired(rth) ||
1395 !net_eq(dev_net(rth->u.dst.dev), net)) { 1395 !net_eq(dev_net(rth->dst.dev), net)) {
1396 rthp = &rth->u.dst.rt_next; 1396 rthp = &rth->dst.rt_next;
1397 continue; 1397 continue;
1398 } 1398 }
1399 1399
1400 if (rth->rt_dst != daddr || 1400 if (rth->rt_dst != daddr ||
1401 rth->rt_src != saddr || 1401 rth->rt_src != saddr ||
1402 rth->u.dst.error || 1402 rth->dst.error ||
1403 rth->rt_gateway != old_gw || 1403 rth->rt_gateway != old_gw ||
1404 rth->u.dst.dev != dev) 1404 rth->dst.dev != dev)
1405 break; 1405 break;
1406 1406
1407 dst_hold(&rth->u.dst); 1407 dst_hold(&rth->dst);
1408 1408
1409 rt = dst_alloc(&ipv4_dst_ops); 1409 rt = dst_alloc(&ipv4_dst_ops);
1410 if (rt == NULL) { 1410 if (rt == NULL) {
@@ -1414,20 +1414,20 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1414 1414
1415 /* Copy all the information. */ 1415 /* Copy all the information. */
1416 *rt = *rth; 1416 *rt = *rth;
1417 rt->u.dst.__use = 1; 1417 rt->dst.__use = 1;
1418 atomic_set(&rt->u.dst.__refcnt, 1); 1418 atomic_set(&rt->dst.__refcnt, 1);
1419 rt->u.dst.child = NULL; 1419 rt->dst.child = NULL;
1420 if (rt->u.dst.dev) 1420 if (rt->dst.dev)
1421 dev_hold(rt->u.dst.dev); 1421 dev_hold(rt->dst.dev);
1422 if (rt->idev) 1422 if (rt->idev)
1423 in_dev_hold(rt->idev); 1423 in_dev_hold(rt->idev);
1424 rt->u.dst.obsolete = -1; 1424 rt->dst.obsolete = -1;
1425 rt->u.dst.lastuse = jiffies; 1425 rt->dst.lastuse = jiffies;
1426 rt->u.dst.path = &rt->u.dst; 1426 rt->dst.path = &rt->dst;
1427 rt->u.dst.neighbour = NULL; 1427 rt->dst.neighbour = NULL;
1428 rt->u.dst.hh = NULL; 1428 rt->dst.hh = NULL;
1429#ifdef CONFIG_XFRM 1429#ifdef CONFIG_XFRM
1430 rt->u.dst.xfrm = NULL; 1430 rt->dst.xfrm = NULL;
1431#endif 1431#endif
1432 rt->rt_genid = rt_genid(net); 1432 rt->rt_genid = rt_genid(net);
1433 rt->rt_flags |= RTCF_REDIRECTED; 1433 rt->rt_flags |= RTCF_REDIRECTED;
@@ -1436,23 +1436,23 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1436 rt->rt_gateway = new_gw; 1436 rt->rt_gateway = new_gw;
1437 1437
1438 /* Redirect received -> path was valid */ 1438 /* Redirect received -> path was valid */
1439 dst_confirm(&rth->u.dst); 1439 dst_confirm(&rth->dst);
1440 1440
1441 if (rt->peer) 1441 if (rt->peer)
1442 atomic_inc(&rt->peer->refcnt); 1442 atomic_inc(&rt->peer->refcnt);
1443 1443
1444 if (arp_bind_neighbour(&rt->u.dst) || 1444 if (arp_bind_neighbour(&rt->dst) ||
1445 !(rt->u.dst.neighbour->nud_state & 1445 !(rt->dst.neighbour->nud_state &
1446 NUD_VALID)) { 1446 NUD_VALID)) {
1447 if (rt->u.dst.neighbour) 1447 if (rt->dst.neighbour)
1448 neigh_event_send(rt->u.dst.neighbour, NULL); 1448 neigh_event_send(rt->dst.neighbour, NULL);
1449 ip_rt_put(rth); 1449 ip_rt_put(rth);
1450 rt_drop(rt); 1450 rt_drop(rt);
1451 goto do_next; 1451 goto do_next;
1452 } 1452 }
1453 1453
1454 netevent.old = &rth->u.dst; 1454 netevent.old = &rth->dst;
1455 netevent.new = &rt->u.dst; 1455 netevent.new = &rt->dst;
1456 call_netevent_notifiers(NETEVENT_REDIRECT, 1456 call_netevent_notifiers(NETEVENT_REDIRECT,
1457 &netevent); 1457 &netevent);
1458 1458
@@ -1488,8 +1488,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1488 ip_rt_put(rt); 1488 ip_rt_put(rt);
1489 ret = NULL; 1489 ret = NULL;
1490 } else if ((rt->rt_flags & RTCF_REDIRECTED) || 1490 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1491 (rt->u.dst.expires && 1491 (rt->dst.expires &&
1492 time_after_eq(jiffies, rt->u.dst.expires))) { 1492 time_after_eq(jiffies, rt->dst.expires))) {
1493 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, 1493 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1494 rt->fl.oif, 1494 rt->fl.oif,
1495 rt_genid(dev_net(dst->dev))); 1495 rt_genid(dev_net(dst->dev)));
@@ -1527,7 +1527,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1527 int log_martians; 1527 int log_martians;
1528 1528
1529 rcu_read_lock(); 1529 rcu_read_lock();
1530 in_dev = __in_dev_get_rcu(rt->u.dst.dev); 1530 in_dev = __in_dev_get_rcu(rt->dst.dev);
1531 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) { 1531 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1532 rcu_read_unlock(); 1532 rcu_read_unlock();
1533 return; 1533 return;
@@ -1538,30 +1538,30 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1538 /* No redirected packets during ip_rt_redirect_silence; 1538 /* No redirected packets during ip_rt_redirect_silence;
1539 * reset the algorithm. 1539 * reset the algorithm.
1540 */ 1540 */
1541 if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence)) 1541 if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence))
1542 rt->u.dst.rate_tokens = 0; 1542 rt->dst.rate_tokens = 0;
1543 1543
1544 /* Too many ignored redirects; do not send anything 1544 /* Too many ignored redirects; do not send anything
1545 * set u.dst.rate_last to the last seen redirected packet. 1545 * set dst.rate_last to the last seen redirected packet.
1546 */ 1546 */
1547 if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) { 1547 if (rt->dst.rate_tokens >= ip_rt_redirect_number) {
1548 rt->u.dst.rate_last = jiffies; 1548 rt->dst.rate_last = jiffies;
1549 return; 1549 return;
1550 } 1550 }
1551 1551
1552 /* Check for load limit; set rate_last to the latest sent 1552 /* Check for load limit; set rate_last to the latest sent
1553 * redirect. 1553 * redirect.
1554 */ 1554 */
1555 if (rt->u.dst.rate_tokens == 0 || 1555 if (rt->dst.rate_tokens == 0 ||
1556 time_after(jiffies, 1556 time_after(jiffies,
1557 (rt->u.dst.rate_last + 1557 (rt->dst.rate_last +
1558 (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) { 1558 (ip_rt_redirect_load << rt->dst.rate_tokens)))) {
1559 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); 1559 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1560 rt->u.dst.rate_last = jiffies; 1560 rt->dst.rate_last = jiffies;
1561 ++rt->u.dst.rate_tokens; 1561 ++rt->dst.rate_tokens;
1562#ifdef CONFIG_IP_ROUTE_VERBOSE 1562#ifdef CONFIG_IP_ROUTE_VERBOSE
1563 if (log_martians && 1563 if (log_martians &&
1564 rt->u.dst.rate_tokens == ip_rt_redirect_number && 1564 rt->dst.rate_tokens == ip_rt_redirect_number &&
1565 net_ratelimit()) 1565 net_ratelimit())
1566 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", 1566 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1567 &rt->rt_src, rt->rt_iif, 1567 &rt->rt_src, rt->rt_iif,
@@ -1576,7 +1576,7 @@ static int ip_error(struct sk_buff *skb)
1576 unsigned long now; 1576 unsigned long now;
1577 int code; 1577 int code;
1578 1578
1579 switch (rt->u.dst.error) { 1579 switch (rt->dst.error) {
1580 case EINVAL: 1580 case EINVAL:
1581 default: 1581 default:
1582 goto out; 1582 goto out;
@@ -1585,7 +1585,7 @@ static int ip_error(struct sk_buff *skb)
1585 break; 1585 break;
1586 case ENETUNREACH: 1586 case ENETUNREACH:
1587 code = ICMP_NET_UNREACH; 1587 code = ICMP_NET_UNREACH;
1588 IP_INC_STATS_BH(dev_net(rt->u.dst.dev), 1588 IP_INC_STATS_BH(dev_net(rt->dst.dev),
1589 IPSTATS_MIB_INNOROUTES); 1589 IPSTATS_MIB_INNOROUTES);
1590 break; 1590 break;
1591 case EACCES: 1591 case EACCES:
@@ -1594,12 +1594,12 @@ static int ip_error(struct sk_buff *skb)
1594 } 1594 }
1595 1595
1596 now = jiffies; 1596 now = jiffies;
1597 rt->u.dst.rate_tokens += now - rt->u.dst.rate_last; 1597 rt->dst.rate_tokens += now - rt->dst.rate_last;
1598 if (rt->u.dst.rate_tokens > ip_rt_error_burst) 1598 if (rt->dst.rate_tokens > ip_rt_error_burst)
1599 rt->u.dst.rate_tokens = ip_rt_error_burst; 1599 rt->dst.rate_tokens = ip_rt_error_burst;
1600 rt->u.dst.rate_last = now; 1600 rt->dst.rate_last = now;
1601 if (rt->u.dst.rate_tokens >= ip_rt_error_cost) { 1601 if (rt->dst.rate_tokens >= ip_rt_error_cost) {
1602 rt->u.dst.rate_tokens -= ip_rt_error_cost; 1602 rt->dst.rate_tokens -= ip_rt_error_cost;
1603 icmp_send(skb, ICMP_DEST_UNREACH, code, 0); 1603 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1604 } 1604 }
1605 1605
@@ -1644,7 +1644,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1644 1644
1645 rcu_read_lock(); 1645 rcu_read_lock();
1646 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 1646 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1647 rth = rcu_dereference(rth->u.dst.rt_next)) { 1647 rth = rcu_dereference(rth->dst.rt_next)) {
1648 unsigned short mtu = new_mtu; 1648 unsigned short mtu = new_mtu;
1649 1649
1650 if (rth->fl.fl4_dst != daddr || 1650 if (rth->fl.fl4_dst != daddr ||
@@ -1653,8 +1653,8 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1653 rth->rt_src != iph->saddr || 1653 rth->rt_src != iph->saddr ||
1654 rth->fl.oif != ikeys[k] || 1654 rth->fl.oif != ikeys[k] ||
1655 rth->fl.iif != 0 || 1655 rth->fl.iif != 0 ||
1656 dst_metric_locked(&rth->u.dst, RTAX_MTU) || 1656 dst_metric_locked(&rth->dst, RTAX_MTU) ||
1657 !net_eq(dev_net(rth->u.dst.dev), net) || 1657 !net_eq(dev_net(rth->dst.dev), net) ||
1658 rt_is_expired(rth)) 1658 rt_is_expired(rth))
1659 continue; 1659 continue;
1660 1660
@@ -1662,22 +1662,22 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1662 1662
1663 /* BSD 4.2 compatibility hack :-( */ 1663 /* BSD 4.2 compatibility hack :-( */
1664 if (mtu == 0 && 1664 if (mtu == 0 &&
1665 old_mtu >= dst_mtu(&rth->u.dst) && 1665 old_mtu >= dst_mtu(&rth->dst) &&
1666 old_mtu >= 68 + (iph->ihl << 2)) 1666 old_mtu >= 68 + (iph->ihl << 2))
1667 old_mtu -= iph->ihl << 2; 1667 old_mtu -= iph->ihl << 2;
1668 1668
1669 mtu = guess_mtu(old_mtu); 1669 mtu = guess_mtu(old_mtu);
1670 } 1670 }
1671 if (mtu <= dst_mtu(&rth->u.dst)) { 1671 if (mtu <= dst_mtu(&rth->dst)) {
1672 if (mtu < dst_mtu(&rth->u.dst)) { 1672 if (mtu < dst_mtu(&rth->dst)) {
1673 dst_confirm(&rth->u.dst); 1673 dst_confirm(&rth->dst);
1674 if (mtu < ip_rt_min_pmtu) { 1674 if (mtu < ip_rt_min_pmtu) {
1675 mtu = ip_rt_min_pmtu; 1675 mtu = ip_rt_min_pmtu;
1676 rth->u.dst.metrics[RTAX_LOCK-1] |= 1676 rth->dst.metrics[RTAX_LOCK-1] |=
1677 (1 << RTAX_MTU); 1677 (1 << RTAX_MTU);
1678 } 1678 }
1679 rth->u.dst.metrics[RTAX_MTU-1] = mtu; 1679 rth->dst.metrics[RTAX_MTU-1] = mtu;
1680 dst_set_expires(&rth->u.dst, 1680 dst_set_expires(&rth->dst,
1681 ip_rt_mtu_expires); 1681 ip_rt_mtu_expires);
1682 } 1682 }
1683 est_mtu = mtu; 1683 est_mtu = mtu;
@@ -1750,7 +1750,7 @@ static void ipv4_link_failure(struct sk_buff *skb)
1750 1750
1751 rt = skb_rtable(skb); 1751 rt = skb_rtable(skb);
1752 if (rt) 1752 if (rt)
1753 dst_set_expires(&rt->u.dst, 0); 1753 dst_set_expires(&rt->dst, 0);
1754} 1754}
1755 1755
1756static int ip_rt_bug(struct sk_buff *skb) 1756static int ip_rt_bug(struct sk_buff *skb)
@@ -1778,11 +1778,11 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
1778 1778
1779 if (rt->fl.iif == 0) 1779 if (rt->fl.iif == 0)
1780 src = rt->rt_src; 1780 src = rt->rt_src;
1781 else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) { 1781 else if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0) {
1782 src = FIB_RES_PREFSRC(res); 1782 src = FIB_RES_PREFSRC(res);
1783 fib_res_put(&res); 1783 fib_res_put(&res);
1784 } else 1784 } else
1785 src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway, 1785 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1786 RT_SCOPE_UNIVERSE); 1786 RT_SCOPE_UNIVERSE);
1787 memcpy(addr, &src, 4); 1787 memcpy(addr, &src, 4);
1788} 1788}
@@ -1790,10 +1790,10 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
1790#ifdef CONFIG_NET_CLS_ROUTE 1790#ifdef CONFIG_NET_CLS_ROUTE
1791static void set_class_tag(struct rtable *rt, u32 tag) 1791static void set_class_tag(struct rtable *rt, u32 tag)
1792{ 1792{
1793 if (!(rt->u.dst.tclassid & 0xFFFF)) 1793 if (!(rt->dst.tclassid & 0xFFFF))
1794 rt->u.dst.tclassid |= tag & 0xFFFF; 1794 rt->dst.tclassid |= tag & 0xFFFF;
1795 if (!(rt->u.dst.tclassid & 0xFFFF0000)) 1795 if (!(rt->dst.tclassid & 0xFFFF0000))
1796 rt->u.dst.tclassid |= tag & 0xFFFF0000; 1796 rt->dst.tclassid |= tag & 0xFFFF0000;
1797} 1797}
1798#endif 1798#endif
1799 1799
@@ -1805,30 +1805,30 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1805 if (FIB_RES_GW(*res) && 1805 if (FIB_RES_GW(*res) &&
1806 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 1806 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1807 rt->rt_gateway = FIB_RES_GW(*res); 1807 rt->rt_gateway = FIB_RES_GW(*res);
1808 memcpy(rt->u.dst.metrics, fi->fib_metrics, 1808 memcpy(rt->dst.metrics, fi->fib_metrics,
1809 sizeof(rt->u.dst.metrics)); 1809 sizeof(rt->dst.metrics));
1810 if (fi->fib_mtu == 0) { 1810 if (fi->fib_mtu == 0) {
1811 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; 1811 rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
1812 if (dst_metric_locked(&rt->u.dst, RTAX_MTU) && 1812 if (dst_metric_locked(&rt->dst, RTAX_MTU) &&
1813 rt->rt_gateway != rt->rt_dst && 1813 rt->rt_gateway != rt->rt_dst &&
1814 rt->u.dst.dev->mtu > 576) 1814 rt->dst.dev->mtu > 576)
1815 rt->u.dst.metrics[RTAX_MTU-1] = 576; 1815 rt->dst.metrics[RTAX_MTU-1] = 576;
1816 } 1816 }
1817#ifdef CONFIG_NET_CLS_ROUTE 1817#ifdef CONFIG_NET_CLS_ROUTE
1818 rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid; 1818 rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1819#endif 1819#endif
1820 } else 1820 } else
1821 rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu; 1821 rt->dst.metrics[RTAX_MTU-1]= rt->dst.dev->mtu;
1822 1822
1823 if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0) 1823 if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
1824 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl; 1824 rt->dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1825 if (dst_mtu(&rt->u.dst) > IP_MAX_MTU) 1825 if (dst_mtu(&rt->dst) > IP_MAX_MTU)
1826 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU; 1826 rt->dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1827 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0) 1827 if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0)
1828 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40, 1828 rt->dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->dst.dev->mtu - 40,
1829 ip_rt_min_advmss); 1829 ip_rt_min_advmss);
1830 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) > 65535 - 40) 1830 if (dst_metric(&rt->dst, RTAX_ADVMSS) > 65535 - 40)
1831 rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40; 1831 rt->dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1832 1832
1833#ifdef CONFIG_NET_CLS_ROUTE 1833#ifdef CONFIG_NET_CLS_ROUTE
1834#ifdef CONFIG_IP_MULTIPLE_TABLES 1834#ifdef CONFIG_IP_MULTIPLE_TABLES
@@ -1873,13 +1873,13 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1873 if (!rth) 1873 if (!rth)
1874 goto e_nobufs; 1874 goto e_nobufs;
1875 1875
1876 rth->u.dst.output = ip_rt_bug; 1876 rth->dst.output = ip_rt_bug;
1877 rth->u.dst.obsolete = -1; 1877 rth->dst.obsolete = -1;
1878 1878
1879 atomic_set(&rth->u.dst.__refcnt, 1); 1879 atomic_set(&rth->dst.__refcnt, 1);
1880 rth->u.dst.flags= DST_HOST; 1880 rth->dst.flags= DST_HOST;
1881 if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) 1881 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1882 rth->u.dst.flags |= DST_NOPOLICY; 1882 rth->dst.flags |= DST_NOPOLICY;
1883 rth->fl.fl4_dst = daddr; 1883 rth->fl.fl4_dst = daddr;
1884 rth->rt_dst = daddr; 1884 rth->rt_dst = daddr;
1885 rth->fl.fl4_tos = tos; 1885 rth->fl.fl4_tos = tos;
@@ -1887,13 +1887,13 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1887 rth->fl.fl4_src = saddr; 1887 rth->fl.fl4_src = saddr;
1888 rth->rt_src = saddr; 1888 rth->rt_src = saddr;
1889#ifdef CONFIG_NET_CLS_ROUTE 1889#ifdef CONFIG_NET_CLS_ROUTE
1890 rth->u.dst.tclassid = itag; 1890 rth->dst.tclassid = itag;
1891#endif 1891#endif
1892 rth->rt_iif = 1892 rth->rt_iif =
1893 rth->fl.iif = dev->ifindex; 1893 rth->fl.iif = dev->ifindex;
1894 rth->u.dst.dev = init_net.loopback_dev; 1894 rth->dst.dev = init_net.loopback_dev;
1895 dev_hold(rth->u.dst.dev); 1895 dev_hold(rth->dst.dev);
1896 rth->idev = in_dev_get(rth->u.dst.dev); 1896 rth->idev = in_dev_get(rth->dst.dev);
1897 rth->fl.oif = 0; 1897 rth->fl.oif = 0;
1898 rth->rt_gateway = daddr; 1898 rth->rt_gateway = daddr;
1899 rth->rt_spec_dst= spec_dst; 1899 rth->rt_spec_dst= spec_dst;
@@ -1901,13 +1901,13 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1901 rth->rt_flags = RTCF_MULTICAST; 1901 rth->rt_flags = RTCF_MULTICAST;
1902 rth->rt_type = RTN_MULTICAST; 1902 rth->rt_type = RTN_MULTICAST;
1903 if (our) { 1903 if (our) {
1904 rth->u.dst.input= ip_local_deliver; 1904 rth->dst.input= ip_local_deliver;
1905 rth->rt_flags |= RTCF_LOCAL; 1905 rth->rt_flags |= RTCF_LOCAL;
1906 } 1906 }
1907 1907
1908#ifdef CONFIG_IP_MROUTE 1908#ifdef CONFIG_IP_MROUTE
1909 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev)) 1909 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1910 rth->u.dst.input = ip_mr_input; 1910 rth->dst.input = ip_mr_input;
1911#endif 1911#endif
1912 RT_CACHE_STAT_INC(in_slow_mc); 1912 RT_CACHE_STAT_INC(in_slow_mc);
1913 1913
@@ -2016,12 +2016,12 @@ static int __mkroute_input(struct sk_buff *skb,
2016 goto cleanup; 2016 goto cleanup;
2017 } 2017 }
2018 2018
2019 atomic_set(&rth->u.dst.__refcnt, 1); 2019 atomic_set(&rth->dst.__refcnt, 1);
2020 rth->u.dst.flags= DST_HOST; 2020 rth->dst.flags= DST_HOST;
2021 if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) 2021 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2022 rth->u.dst.flags |= DST_NOPOLICY; 2022 rth->dst.flags |= DST_NOPOLICY;
2023 if (IN_DEV_CONF_GET(out_dev, NOXFRM)) 2023 if (IN_DEV_CONF_GET(out_dev, NOXFRM))
2024 rth->u.dst.flags |= DST_NOXFRM; 2024 rth->dst.flags |= DST_NOXFRM;
2025 rth->fl.fl4_dst = daddr; 2025 rth->fl.fl4_dst = daddr;
2026 rth->rt_dst = daddr; 2026 rth->rt_dst = daddr;
2027 rth->fl.fl4_tos = tos; 2027 rth->fl.fl4_tos = tos;
@@ -2031,16 +2031,16 @@ static int __mkroute_input(struct sk_buff *skb,
2031 rth->rt_gateway = daddr; 2031 rth->rt_gateway = daddr;
2032 rth->rt_iif = 2032 rth->rt_iif =
2033 rth->fl.iif = in_dev->dev->ifindex; 2033 rth->fl.iif = in_dev->dev->ifindex;
2034 rth->u.dst.dev = (out_dev)->dev; 2034 rth->dst.dev = (out_dev)->dev;
2035 dev_hold(rth->u.dst.dev); 2035 dev_hold(rth->dst.dev);
2036 rth->idev = in_dev_get(rth->u.dst.dev); 2036 rth->idev = in_dev_get(rth->dst.dev);
2037 rth->fl.oif = 0; 2037 rth->fl.oif = 0;
2038 rth->rt_spec_dst= spec_dst; 2038 rth->rt_spec_dst= spec_dst;
2039 2039
2040 rth->u.dst.obsolete = -1; 2040 rth->dst.obsolete = -1;
2041 rth->u.dst.input = ip_forward; 2041 rth->dst.input = ip_forward;
2042 rth->u.dst.output = ip_output; 2042 rth->dst.output = ip_output;
2043 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); 2043 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2044 2044
2045 rt_set_nexthop(rth, res, itag); 2045 rt_set_nexthop(rth, res, itag);
2046 2046
@@ -2074,7 +2074,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
2074 2074
2075 /* put it into the cache */ 2075 /* put it into the cache */
2076 hash = rt_hash(daddr, saddr, fl->iif, 2076 hash = rt_hash(daddr, saddr, fl->iif,
2077 rt_genid(dev_net(rth->u.dst.dev))); 2077 rt_genid(dev_net(rth->dst.dev)));
2078 return rt_intern_hash(hash, rth, NULL, skb, fl->iif); 2078 return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
2079} 2079}
2080 2080
@@ -2197,14 +2197,14 @@ local_input:
2197 if (!rth) 2197 if (!rth)
2198 goto e_nobufs; 2198 goto e_nobufs;
2199 2199
2200 rth->u.dst.output= ip_rt_bug; 2200 rth->dst.output= ip_rt_bug;
2201 rth->u.dst.obsolete = -1; 2201 rth->dst.obsolete = -1;
2202 rth->rt_genid = rt_genid(net); 2202 rth->rt_genid = rt_genid(net);
2203 2203
2204 atomic_set(&rth->u.dst.__refcnt, 1); 2204 atomic_set(&rth->dst.__refcnt, 1);
2205 rth->u.dst.flags= DST_HOST; 2205 rth->dst.flags= DST_HOST;
2206 if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) 2206 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2207 rth->u.dst.flags |= DST_NOPOLICY; 2207 rth->dst.flags |= DST_NOPOLICY;
2208 rth->fl.fl4_dst = daddr; 2208 rth->fl.fl4_dst = daddr;
2209 rth->rt_dst = daddr; 2209 rth->rt_dst = daddr;
2210 rth->fl.fl4_tos = tos; 2210 rth->fl.fl4_tos = tos;
@@ -2212,20 +2212,20 @@ local_input:
2212 rth->fl.fl4_src = saddr; 2212 rth->fl.fl4_src = saddr;
2213 rth->rt_src = saddr; 2213 rth->rt_src = saddr;
2214#ifdef CONFIG_NET_CLS_ROUTE 2214#ifdef CONFIG_NET_CLS_ROUTE
2215 rth->u.dst.tclassid = itag; 2215 rth->dst.tclassid = itag;
2216#endif 2216#endif
2217 rth->rt_iif = 2217 rth->rt_iif =
2218 rth->fl.iif = dev->ifindex; 2218 rth->fl.iif = dev->ifindex;
2219 rth->u.dst.dev = net->loopback_dev; 2219 rth->dst.dev = net->loopback_dev;
2220 dev_hold(rth->u.dst.dev); 2220 dev_hold(rth->dst.dev);
2221 rth->idev = in_dev_get(rth->u.dst.dev); 2221 rth->idev = in_dev_get(rth->dst.dev);
2222 rth->rt_gateway = daddr; 2222 rth->rt_gateway = daddr;
2223 rth->rt_spec_dst= spec_dst; 2223 rth->rt_spec_dst= spec_dst;
2224 rth->u.dst.input= ip_local_deliver; 2224 rth->dst.input= ip_local_deliver;
2225 rth->rt_flags = flags|RTCF_LOCAL; 2225 rth->rt_flags = flags|RTCF_LOCAL;
2226 if (res.type == RTN_UNREACHABLE) { 2226 if (res.type == RTN_UNREACHABLE) {
2227 rth->u.dst.input= ip_error; 2227 rth->dst.input= ip_error;
2228 rth->u.dst.error= -err; 2228 rth->dst.error= -err;
2229 rth->rt_flags &= ~RTCF_LOCAL; 2229 rth->rt_flags &= ~RTCF_LOCAL;
2230 } 2230 }
2231 rth->rt_type = res.type; 2231 rth->rt_type = res.type;
@@ -2291,21 +2291,21 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2291 hash = rt_hash(daddr, saddr, iif, rt_genid(net)); 2291 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2292 2292
2293 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2293 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2294 rth = rcu_dereference(rth->u.dst.rt_next)) { 2294 rth = rcu_dereference(rth->dst.rt_next)) {
2295 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) | 2295 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
2296 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) | 2296 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
2297 (rth->fl.iif ^ iif) | 2297 (rth->fl.iif ^ iif) |
2298 rth->fl.oif | 2298 rth->fl.oif |
2299 (rth->fl.fl4_tos ^ tos)) == 0 && 2299 (rth->fl.fl4_tos ^ tos)) == 0 &&
2300 rth->fl.mark == skb->mark && 2300 rth->fl.mark == skb->mark &&
2301 net_eq(dev_net(rth->u.dst.dev), net) && 2301 net_eq(dev_net(rth->dst.dev), net) &&
2302 !rt_is_expired(rth)) { 2302 !rt_is_expired(rth)) {
2303 if (noref) { 2303 if (noref) {
2304 dst_use_noref(&rth->u.dst, jiffies); 2304 dst_use_noref(&rth->dst, jiffies);
2305 skb_dst_set_noref(skb, &rth->u.dst); 2305 skb_dst_set_noref(skb, &rth->dst);
2306 } else { 2306 } else {
2307 dst_use(&rth->u.dst, jiffies); 2307 dst_use(&rth->dst, jiffies);
2308 skb_dst_set(skb, &rth->u.dst); 2308 skb_dst_set(skb, &rth->dst);
2309 } 2309 }
2310 RT_CACHE_STAT_INC(in_hit); 2310 RT_CACHE_STAT_INC(in_hit);
2311 rcu_read_unlock(); 2311 rcu_read_unlock();
@@ -2412,12 +2412,12 @@ static int __mkroute_output(struct rtable **result,
2412 goto cleanup; 2412 goto cleanup;
2413 } 2413 }
2414 2414
2415 atomic_set(&rth->u.dst.__refcnt, 1); 2415 atomic_set(&rth->dst.__refcnt, 1);
2416 rth->u.dst.flags= DST_HOST; 2416 rth->dst.flags= DST_HOST;
2417 if (IN_DEV_CONF_GET(in_dev, NOXFRM)) 2417 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2418 rth->u.dst.flags |= DST_NOXFRM; 2418 rth->dst.flags |= DST_NOXFRM;
2419 if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) 2419 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2420 rth->u.dst.flags |= DST_NOPOLICY; 2420 rth->dst.flags |= DST_NOPOLICY;
2421 2421
2422 rth->fl.fl4_dst = oldflp->fl4_dst; 2422 rth->fl.fl4_dst = oldflp->fl4_dst;
2423 rth->fl.fl4_tos = tos; 2423 rth->fl.fl4_tos = tos;
@@ -2429,35 +2429,35 @@ static int __mkroute_output(struct rtable **result,
2429 rth->rt_iif = oldflp->oif ? : dev_out->ifindex; 2429 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2430 /* get references to the devices that are to be hold by the routing 2430 /* get references to the devices that are to be hold by the routing
2431 cache entry */ 2431 cache entry */
2432 rth->u.dst.dev = dev_out; 2432 rth->dst.dev = dev_out;
2433 dev_hold(dev_out); 2433 dev_hold(dev_out);
2434 rth->idev = in_dev_get(dev_out); 2434 rth->idev = in_dev_get(dev_out);
2435 rth->rt_gateway = fl->fl4_dst; 2435 rth->rt_gateway = fl->fl4_dst;
2436 rth->rt_spec_dst= fl->fl4_src; 2436 rth->rt_spec_dst= fl->fl4_src;
2437 2437
2438 rth->u.dst.output=ip_output; 2438 rth->dst.output=ip_output;
2439 rth->u.dst.obsolete = -1; 2439 rth->dst.obsolete = -1;
2440 rth->rt_genid = rt_genid(dev_net(dev_out)); 2440 rth->rt_genid = rt_genid(dev_net(dev_out));
2441 2441
2442 RT_CACHE_STAT_INC(out_slow_tot); 2442 RT_CACHE_STAT_INC(out_slow_tot);
2443 2443
2444 if (flags & RTCF_LOCAL) { 2444 if (flags & RTCF_LOCAL) {
2445 rth->u.dst.input = ip_local_deliver; 2445 rth->dst.input = ip_local_deliver;
2446 rth->rt_spec_dst = fl->fl4_dst; 2446 rth->rt_spec_dst = fl->fl4_dst;
2447 } 2447 }
2448 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 2448 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2449 rth->rt_spec_dst = fl->fl4_src; 2449 rth->rt_spec_dst = fl->fl4_src;
2450 if (flags & RTCF_LOCAL && 2450 if (flags & RTCF_LOCAL &&
2451 !(dev_out->flags & IFF_LOOPBACK)) { 2451 !(dev_out->flags & IFF_LOOPBACK)) {
2452 rth->u.dst.output = ip_mc_output; 2452 rth->dst.output = ip_mc_output;
2453 RT_CACHE_STAT_INC(out_slow_mc); 2453 RT_CACHE_STAT_INC(out_slow_mc);
2454 } 2454 }
2455#ifdef CONFIG_IP_MROUTE 2455#ifdef CONFIG_IP_MROUTE
2456 if (res->type == RTN_MULTICAST) { 2456 if (res->type == RTN_MULTICAST) {
2457 if (IN_DEV_MFORWARD(in_dev) && 2457 if (IN_DEV_MFORWARD(in_dev) &&
2458 !ipv4_is_local_multicast(oldflp->fl4_dst)) { 2458 !ipv4_is_local_multicast(oldflp->fl4_dst)) {
2459 rth->u.dst.input = ip_mr_input; 2459 rth->dst.input = ip_mr_input;
2460 rth->u.dst.output = ip_mc_output; 2460 rth->dst.output = ip_mc_output;
2461 } 2461 }
2462 } 2462 }
2463#endif 2463#endif
@@ -2712,7 +2712,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2712 2712
2713 rcu_read_lock_bh(); 2713 rcu_read_lock_bh();
2714 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth; 2714 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2715 rth = rcu_dereference_bh(rth->u.dst.rt_next)) { 2715 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2716 if (rth->fl.fl4_dst == flp->fl4_dst && 2716 if (rth->fl.fl4_dst == flp->fl4_dst &&
2717 rth->fl.fl4_src == flp->fl4_src && 2717 rth->fl.fl4_src == flp->fl4_src &&
2718 rth->fl.iif == 0 && 2718 rth->fl.iif == 0 &&
@@ -2720,9 +2720,9 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2720 rth->fl.mark == flp->mark && 2720 rth->fl.mark == flp->mark &&
2721 !((rth->fl.fl4_tos ^ flp->fl4_tos) & 2721 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2722 (IPTOS_RT_MASK | RTO_ONLINK)) && 2722 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2723 net_eq(dev_net(rth->u.dst.dev), net) && 2723 net_eq(dev_net(rth->dst.dev), net) &&
2724 !rt_is_expired(rth)) { 2724 !rt_is_expired(rth)) {
2725 dst_use(&rth->u.dst, jiffies); 2725 dst_use(&rth->dst, jiffies);
2726 RT_CACHE_STAT_INC(out_hit); 2726 RT_CACHE_STAT_INC(out_hit);
2727 rcu_read_unlock_bh(); 2727 rcu_read_unlock_bh();
2728 *rp = rth; 2728 *rp = rth;
@@ -2759,15 +2759,15 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
2759 dst_alloc(&ipv4_dst_blackhole_ops); 2759 dst_alloc(&ipv4_dst_blackhole_ops);
2760 2760
2761 if (rt) { 2761 if (rt) {
2762 struct dst_entry *new = &rt->u.dst; 2762 struct dst_entry *new = &rt->dst;
2763 2763
2764 atomic_set(&new->__refcnt, 1); 2764 atomic_set(&new->__refcnt, 1);
2765 new->__use = 1; 2765 new->__use = 1;
2766 new->input = dst_discard; 2766 new->input = dst_discard;
2767 new->output = dst_discard; 2767 new->output = dst_discard;
2768 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32)); 2768 memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
2769 2769
2770 new->dev = ort->u.dst.dev; 2770 new->dev = ort->dst.dev;
2771 if (new->dev) 2771 if (new->dev)
2772 dev_hold(new->dev); 2772 dev_hold(new->dev);
2773 2773
@@ -2791,7 +2791,7 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
2791 dst_free(new); 2791 dst_free(new);
2792 } 2792 }
2793 2793
2794 dst_release(&(*rp)->u.dst); 2794 dst_release(&(*rp)->dst);
2795 *rp = rt; 2795 *rp = rt;
2796 return (rt ? 0 : -ENOMEM); 2796 return (rt ? 0 : -ENOMEM);
2797} 2797}
@@ -2861,11 +2861,11 @@ static int rt_fill_info(struct net *net,
2861 r->rtm_src_len = 32; 2861 r->rtm_src_len = 32;
2862 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src); 2862 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2863 } 2863 }
2864 if (rt->u.dst.dev) 2864 if (rt->dst.dev)
2865 NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex); 2865 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
2866#ifdef CONFIG_NET_CLS_ROUTE 2866#ifdef CONFIG_NET_CLS_ROUTE
2867 if (rt->u.dst.tclassid) 2867 if (rt->dst.tclassid)
2868 NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid); 2868 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
2869#endif 2869#endif
2870 if (rt->fl.iif) 2870 if (rt->fl.iif)
2871 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); 2871 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
@@ -2875,11 +2875,11 @@ static int rt_fill_info(struct net *net,
2875 if (rt->rt_dst != rt->rt_gateway) 2875 if (rt->rt_dst != rt->rt_gateway)
2876 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway); 2876 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2877 2877
2878 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) 2878 if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
2879 goto nla_put_failure; 2879 goto nla_put_failure;
2880 2880
2881 error = rt->u.dst.error; 2881 error = rt->dst.error;
2882 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; 2882 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
2883 if (rt->peer) { 2883 if (rt->peer) {
2884 id = atomic_read(&rt->peer->ip_id_count) & 0xffff; 2884 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
2885 if (rt->peer->tcp_ts_stamp) { 2885 if (rt->peer->tcp_ts_stamp) {
@@ -2911,7 +2911,7 @@ static int rt_fill_info(struct net *net,
2911 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif); 2911 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
2912 } 2912 }
2913 2913
2914 if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage, 2914 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
2915 expires, error) < 0) 2915 expires, error) < 0)
2916 goto nla_put_failure; 2916 goto nla_put_failure;
2917 2917
@@ -2976,8 +2976,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2976 local_bh_enable(); 2976 local_bh_enable();
2977 2977
2978 rt = skb_rtable(skb); 2978 rt = skb_rtable(skb);
2979 if (err == 0 && rt->u.dst.error) 2979 if (err == 0 && rt->dst.error)
2980 err = -rt->u.dst.error; 2980 err = -rt->dst.error;
2981 } else { 2981 } else {
2982 struct flowi fl = { 2982 struct flowi fl = {
2983 .nl_u = { 2983 .nl_u = {
@@ -2995,7 +2995,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2995 if (err) 2995 if (err)
2996 goto errout_free; 2996 goto errout_free;
2997 2997
2998 skb_dst_set(skb, &rt->u.dst); 2998 skb_dst_set(skb, &rt->dst);
2999 if (rtm->rtm_flags & RTM_F_NOTIFY) 2999 if (rtm->rtm_flags & RTM_F_NOTIFY)
3000 rt->rt_flags |= RTCF_NOTIFY; 3000 rt->rt_flags |= RTCF_NOTIFY;
3001 3001
@@ -3031,12 +3031,12 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
3031 continue; 3031 continue;
3032 rcu_read_lock_bh(); 3032 rcu_read_lock_bh();
3033 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt; 3033 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
3034 rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) { 3034 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
3035 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) 3035 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
3036 continue; 3036 continue;
3037 if (rt_is_expired(rt)) 3037 if (rt_is_expired(rt))
3038 continue; 3038 continue;
3039 skb_dst_set_noref(skb, &rt->u.dst); 3039 skb_dst_set_noref(skb, &rt->dst);
3040 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, 3040 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
3041 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 3041 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
3042 1, NLM_F_MULTI) <= 0) { 3042 1, NLM_F_MULTI) <= 0) {
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 5c48124332de..02bef6aa8b30 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -354,15 +354,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
354 } 354 }
355 355
356 /* Try to redo what tcp_v4_send_synack did. */ 356 /* Try to redo what tcp_v4_send_synack did. */
357 req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); 357 req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
358 358
359 tcp_select_initial_window(tcp_full_space(sk), req->mss, 359 tcp_select_initial_window(tcp_full_space(sk), req->mss,
360 &req->rcv_wnd, &req->window_clamp, 360 &req->rcv_wnd, &req->window_clamp,
361 ireq->wscale_ok, &rcv_wscale, 361 ireq->wscale_ok, &rcv_wscale,
362 dst_metric(&rt->u.dst, RTAX_INITRWND)); 362 dst_metric(&rt->dst, RTAX_INITRWND));
363 363
364 ireq->rcv_wscale = rcv_wscale; 364 ireq->rcv_wscale = rcv_wscale;
365 365
366 ret = get_cookie_sock(sk, skb, req, &rt->u.dst); 366 ret = get_cookie_sock(sk, skb, req, &rt->dst);
367out: return ret; 367out: return ret;
368} 368}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 7f976af27bf0..7f9515c0379f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -237,7 +237,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
237 237
238 /* OK, now commit destination to socket. */ 238 /* OK, now commit destination to socket. */
239 sk->sk_gso_type = SKB_GSO_TCPV4; 239 sk->sk_gso_type = SKB_GSO_TCPV4;
240 sk_setup_caps(sk, &rt->u.dst); 240 sk_setup_caps(sk, &rt->dst);
241 241
242 if (!tp->write_seq) 242 if (!tp->write_seq)
243 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, 243 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index eec4ff456e33..32e0bef60d0a 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -914,7 +914,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
914 !sock_flag(sk, SOCK_BROADCAST)) 914 !sock_flag(sk, SOCK_BROADCAST))
915 goto out; 915 goto out;
916 if (connected) 916 if (connected)
917 sk_dst_set(sk, dst_clone(&rt->u.dst)); 917 sk_dst_set(sk, dst_clone(&rt->dst));
918 } 918 }
919 919
920 if (msg->msg_flags&MSG_CONFIRM) 920 if (msg->msg_flags&MSG_CONFIRM)
@@ -978,7 +978,7 @@ out:
978 return err; 978 return err;
979 979
980do_confirm: 980do_confirm:
981 dst_confirm(&rt->u.dst); 981 dst_confirm(&rt->dst);
982 if (!(msg->msg_flags&MSG_PROBE) || len) 982 if (!(msg->msg_flags&MSG_PROBE) || len)
983 goto back_from_confirm; 983 goto back_from_confirm;
984 err = 0; 984 err = 0;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 1705476670ef..349327092c9e 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -37,7 +37,7 @@ static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
37 fl.fl4_src = saddr->a4; 37 fl.fl4_src = saddr->a4;
38 38
39 err = __ip_route_output_key(net, &rt, &fl); 39 err = __ip_route_output_key(net, &rt, &fl);
40 dst = &rt->u.dst; 40 dst = &rt->dst;
41 if (err) 41 if (err)
42 dst = ERR_PTR(err); 42 dst = ERR_PTR(err);
43 return dst; 43 return dst;