diff options
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r-- | net/ipv4/route.c | 99 |
1 files changed, 51 insertions, 48 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 20ffe8e88c0f..c41ddba02e9d 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -261,6 +261,10 @@ static unsigned int rt_hash_code(u32 daddr, u32 saddr) | |||
261 | & rt_hash_mask); | 261 | & rt_hash_mask); |
262 | } | 262 | } |
263 | 263 | ||
264 | #define rt_hash(daddr, saddr, idx) \ | ||
265 | rt_hash_code((__force u32)(__be32)(daddr),\ | ||
266 | (__force u32)(__be32)(saddr) ^ ((idx) << 5)) | ||
267 | |||
264 | #ifdef CONFIG_PROC_FS | 268 | #ifdef CONFIG_PROC_FS |
265 | struct rt_cache_iter_state { | 269 | struct rt_cache_iter_state { |
266 | int bucket; | 270 | int bucket; |
@@ -1074,7 +1078,7 @@ static void ip_select_fb_ident(struct iphdr *iph) | |||
1074 | u32 salt; | 1078 | u32 salt; |
1075 | 1079 | ||
1076 | spin_lock_bh(&ip_fb_id_lock); | 1080 | spin_lock_bh(&ip_fb_id_lock); |
1077 | salt = secure_ip_id(ip_fallback_id ^ iph->daddr); | 1081 | salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr); |
1078 | iph->id = htons(salt & 0xFFFF); | 1082 | iph->id = htons(salt & 0xFFFF); |
1079 | ip_fallback_id = salt; | 1083 | ip_fallback_id = salt; |
1080 | spin_unlock_bh(&ip_fb_id_lock); | 1084 | spin_unlock_bh(&ip_fb_id_lock); |
@@ -1118,13 +1122,13 @@ static void rt_del(unsigned hash, struct rtable *rt) | |||
1118 | spin_unlock_bh(rt_hash_lock_addr(hash)); | 1122 | spin_unlock_bh(rt_hash_lock_addr(hash)); |
1119 | } | 1123 | } |
1120 | 1124 | ||
1121 | void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw, | 1125 | void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, |
1122 | u32 saddr, struct net_device *dev) | 1126 | __be32 saddr, struct net_device *dev) |
1123 | { | 1127 | { |
1124 | int i, k; | 1128 | int i, k; |
1125 | struct in_device *in_dev = in_dev_get(dev); | 1129 | struct in_device *in_dev = in_dev_get(dev); |
1126 | struct rtable *rth, **rthp; | 1130 | struct rtable *rth, **rthp; |
1127 | u32 skeys[2] = { saddr, 0 }; | 1131 | __be32 skeys[2] = { saddr, 0 }; |
1128 | int ikeys[2] = { dev->ifindex, 0 }; | 1132 | int ikeys[2] = { dev->ifindex, 0 }; |
1129 | struct netevent_redirect netevent; | 1133 | struct netevent_redirect netevent; |
1130 | 1134 | ||
@@ -1147,8 +1151,7 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw, | |||
1147 | 1151 | ||
1148 | for (i = 0; i < 2; i++) { | 1152 | for (i = 0; i < 2; i++) { |
1149 | for (k = 0; k < 2; k++) { | 1153 | for (k = 0; k < 2; k++) { |
1150 | unsigned hash = rt_hash_code(daddr, | 1154 | unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]); |
1151 | skeys[i] ^ (ikeys[k] << 5)); | ||
1152 | 1155 | ||
1153 | rthp=&rt_hash_table[hash].chain; | 1156 | rthp=&rt_hash_table[hash].chain; |
1154 | 1157 | ||
@@ -1260,9 +1263,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) | |||
1260 | ret = NULL; | 1263 | ret = NULL; |
1261 | } else if ((rt->rt_flags & RTCF_REDIRECTED) || | 1264 | } else if ((rt->rt_flags & RTCF_REDIRECTED) || |
1262 | rt->u.dst.expires) { | 1265 | rt->u.dst.expires) { |
1263 | unsigned hash = rt_hash_code(rt->fl.fl4_dst, | 1266 | unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, |
1264 | rt->fl.fl4_src ^ | 1267 | rt->fl.oif); |
1265 | (rt->fl.oif << 5)); | ||
1266 | #if RT_CACHE_DEBUG >= 1 | 1268 | #if RT_CACHE_DEBUG >= 1 |
1267 | printk(KERN_DEBUG "ip_rt_advice: redirect to " | 1269 | printk(KERN_DEBUG "ip_rt_advice: redirect to " |
1268 | "%u.%u.%u.%u/%02x dropped\n", | 1270 | "%u.%u.%u.%u/%02x dropped\n", |
@@ -1397,15 +1399,15 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu) | |||
1397 | int i; | 1399 | int i; |
1398 | unsigned short old_mtu = ntohs(iph->tot_len); | 1400 | unsigned short old_mtu = ntohs(iph->tot_len); |
1399 | struct rtable *rth; | 1401 | struct rtable *rth; |
1400 | u32 skeys[2] = { iph->saddr, 0, }; | 1402 | __be32 skeys[2] = { iph->saddr, 0, }; |
1401 | u32 daddr = iph->daddr; | 1403 | __be32 daddr = iph->daddr; |
1402 | unsigned short est_mtu = 0; | 1404 | unsigned short est_mtu = 0; |
1403 | 1405 | ||
1404 | if (ipv4_config.no_pmtu_disc) | 1406 | if (ipv4_config.no_pmtu_disc) |
1405 | return 0; | 1407 | return 0; |
1406 | 1408 | ||
1407 | for (i = 0; i < 2; i++) { | 1409 | for (i = 0; i < 2; i++) { |
1408 | unsigned hash = rt_hash_code(daddr, skeys[i]); | 1410 | unsigned hash = rt_hash(daddr, skeys[i], 0); |
1409 | 1411 | ||
1410 | rcu_read_lock(); | 1412 | rcu_read_lock(); |
1411 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; | 1413 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; |
@@ -1530,7 +1532,7 @@ static int ip_rt_bug(struct sk_buff *skb) | |||
1530 | 1532 | ||
1531 | void ip_rt_get_source(u8 *addr, struct rtable *rt) | 1533 | void ip_rt_get_source(u8 *addr, struct rtable *rt) |
1532 | { | 1534 | { |
1533 | u32 src; | 1535 | __be32 src; |
1534 | struct fib_result res; | 1536 | struct fib_result res; |
1535 | 1537 | ||
1536 | if (rt->fl.iif == 0) | 1538 | if (rt->fl.iif == 0) |
@@ -1596,12 +1598,12 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag) | |||
1596 | rt->rt_type = res->type; | 1598 | rt->rt_type = res->type; |
1597 | } | 1599 | } |
1598 | 1600 | ||
1599 | static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr, | 1601 | static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
1600 | u8 tos, struct net_device *dev, int our) | 1602 | u8 tos, struct net_device *dev, int our) |
1601 | { | 1603 | { |
1602 | unsigned hash; | 1604 | unsigned hash; |
1603 | struct rtable *rth; | 1605 | struct rtable *rth; |
1604 | u32 spec_dst; | 1606 | __be32 spec_dst; |
1605 | struct in_device *in_dev = in_dev_get(dev); | 1607 | struct in_device *in_dev = in_dev_get(dev); |
1606 | u32 itag = 0; | 1608 | u32 itag = 0; |
1607 | 1609 | ||
@@ -1665,7 +1667,7 @@ static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr, | |||
1665 | RT_CACHE_STAT_INC(in_slow_mc); | 1667 | RT_CACHE_STAT_INC(in_slow_mc); |
1666 | 1668 | ||
1667 | in_dev_put(in_dev); | 1669 | in_dev_put(in_dev); |
1668 | hash = rt_hash_code(daddr, saddr ^ (dev->ifindex << 5)); | 1670 | hash = rt_hash(daddr, saddr, dev->ifindex); |
1669 | return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst); | 1671 | return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst); |
1670 | 1672 | ||
1671 | e_nobufs: | 1673 | e_nobufs: |
@@ -1681,8 +1683,8 @@ e_inval: | |||
1681 | static void ip_handle_martian_source(struct net_device *dev, | 1683 | static void ip_handle_martian_source(struct net_device *dev, |
1682 | struct in_device *in_dev, | 1684 | struct in_device *in_dev, |
1683 | struct sk_buff *skb, | 1685 | struct sk_buff *skb, |
1684 | u32 daddr, | 1686 | __be32 daddr, |
1685 | u32 saddr) | 1687 | __be32 saddr) |
1686 | { | 1688 | { |
1687 | RT_CACHE_STAT_INC(in_martian_src); | 1689 | RT_CACHE_STAT_INC(in_martian_src); |
1688 | #ifdef CONFIG_IP_ROUTE_VERBOSE | 1690 | #ifdef CONFIG_IP_ROUTE_VERBOSE |
@@ -1712,7 +1714,7 @@ static void ip_handle_martian_source(struct net_device *dev, | |||
1712 | static inline int __mkroute_input(struct sk_buff *skb, | 1714 | static inline int __mkroute_input(struct sk_buff *skb, |
1713 | struct fib_result* res, | 1715 | struct fib_result* res, |
1714 | struct in_device *in_dev, | 1716 | struct in_device *in_dev, |
1715 | u32 daddr, u32 saddr, u32 tos, | 1717 | __be32 daddr, __be32 saddr, u32 tos, |
1716 | struct rtable **result) | 1718 | struct rtable **result) |
1717 | { | 1719 | { |
1718 | 1720 | ||
@@ -1720,7 +1722,8 @@ static inline int __mkroute_input(struct sk_buff *skb, | |||
1720 | int err; | 1722 | int err; |
1721 | struct in_device *out_dev; | 1723 | struct in_device *out_dev; |
1722 | unsigned flags = 0; | 1724 | unsigned flags = 0; |
1723 | u32 spec_dst, itag; | 1725 | __be32 spec_dst; |
1726 | u32 itag; | ||
1724 | 1727 | ||
1725 | /* get a working reference to the output device */ | 1728 | /* get a working reference to the output device */ |
1726 | out_dev = in_dev_get(FIB_RES_DEV(*res)); | 1729 | out_dev = in_dev_get(FIB_RES_DEV(*res)); |
@@ -1813,7 +1816,7 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb, | |||
1813 | struct fib_result* res, | 1816 | struct fib_result* res, |
1814 | const struct flowi *fl, | 1817 | const struct flowi *fl, |
1815 | struct in_device *in_dev, | 1818 | struct in_device *in_dev, |
1816 | u32 daddr, u32 saddr, u32 tos) | 1819 | __be32 daddr, __be32 saddr, u32 tos) |
1817 | { | 1820 | { |
1818 | struct rtable* rth = NULL; | 1821 | struct rtable* rth = NULL; |
1819 | int err; | 1822 | int err; |
@@ -1830,7 +1833,7 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb, | |||
1830 | return err; | 1833 | return err; |
1831 | 1834 | ||
1832 | /* put it into the cache */ | 1835 | /* put it into the cache */ |
1833 | hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5)); | 1836 | hash = rt_hash(daddr, saddr, fl->iif); |
1834 | return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); | 1837 | return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); |
1835 | } | 1838 | } |
1836 | 1839 | ||
@@ -1838,7 +1841,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb, | |||
1838 | struct fib_result* res, | 1841 | struct fib_result* res, |
1839 | const struct flowi *fl, | 1842 | const struct flowi *fl, |
1840 | struct in_device *in_dev, | 1843 | struct in_device *in_dev, |
1841 | u32 daddr, u32 saddr, u32 tos) | 1844 | __be32 daddr, __be32 saddr, u32 tos) |
1842 | { | 1845 | { |
1843 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED | 1846 | #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED |
1844 | struct rtable* rth = NULL, *rtres; | 1847 | struct rtable* rth = NULL, *rtres; |
@@ -1871,7 +1874,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb, | |||
1871 | return err; | 1874 | return err; |
1872 | 1875 | ||
1873 | /* put it into the cache */ | 1876 | /* put it into the cache */ |
1874 | hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5)); | 1877 | hash = rt_hash(daddr, saddr, fl->iif); |
1875 | err = rt_intern_hash(hash, rth, &rtres); | 1878 | err = rt_intern_hash(hash, rth, &rtres); |
1876 | if (err) | 1879 | if (err) |
1877 | return err; | 1880 | return err; |
@@ -1901,7 +1904,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb, | |||
1901 | * 2. IP spoofing attempts are filtered with 100% of guarantee. | 1904 | * 2. IP spoofing attempts are filtered with 100% of guarantee. |
1902 | */ | 1905 | */ |
1903 | 1906 | ||
1904 | static int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr, | 1907 | static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
1905 | u8 tos, struct net_device *dev) | 1908 | u8 tos, struct net_device *dev) |
1906 | { | 1909 | { |
1907 | struct fib_result res; | 1910 | struct fib_result res; |
@@ -1920,7 +1923,7 @@ static int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr, | |||
1920 | u32 itag = 0; | 1923 | u32 itag = 0; |
1921 | struct rtable * rth; | 1924 | struct rtable * rth; |
1922 | unsigned hash; | 1925 | unsigned hash; |
1923 | u32 spec_dst; | 1926 | __be32 spec_dst; |
1924 | int err = -EINVAL; | 1927 | int err = -EINVAL; |
1925 | int free_res = 0; | 1928 | int free_res = 0; |
1926 | 1929 | ||
@@ -1936,7 +1939,7 @@ static int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr, | |||
1936 | if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr)) | 1939 | if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr)) |
1937 | goto martian_source; | 1940 | goto martian_source; |
1938 | 1941 | ||
1939 | if (daddr == 0xFFFFFFFF || (saddr == 0 && daddr == 0)) | 1942 | if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0)) |
1940 | goto brd_input; | 1943 | goto brd_input; |
1941 | 1944 | ||
1942 | /* Accept zero addresses only to limited broadcast; | 1945 | /* Accept zero addresses only to limited broadcast; |
@@ -2048,7 +2051,7 @@ local_input: | |||
2048 | rth->rt_flags &= ~RTCF_LOCAL; | 2051 | rth->rt_flags &= ~RTCF_LOCAL; |
2049 | } | 2052 | } |
2050 | rth->rt_type = res.type; | 2053 | rth->rt_type = res.type; |
2051 | hash = rt_hash_code(daddr, saddr ^ (fl.iif << 5)); | 2054 | hash = rt_hash(daddr, saddr, fl.iif); |
2052 | err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); | 2055 | err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); |
2053 | goto done; | 2056 | goto done; |
2054 | 2057 | ||
@@ -2087,7 +2090,7 @@ martian_source: | |||
2087 | goto e_inval; | 2090 | goto e_inval; |
2088 | } | 2091 | } |
2089 | 2092 | ||
2090 | int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr, | 2093 | int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
2091 | u8 tos, struct net_device *dev) | 2094 | u8 tos, struct net_device *dev) |
2092 | { | 2095 | { |
2093 | struct rtable * rth; | 2096 | struct rtable * rth; |
@@ -2095,7 +2098,7 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr, | |||
2095 | int iif = dev->ifindex; | 2098 | int iif = dev->ifindex; |
2096 | 2099 | ||
2097 | tos &= IPTOS_RT_MASK; | 2100 | tos &= IPTOS_RT_MASK; |
2098 | hash = rt_hash_code(daddr, saddr ^ (iif << 5)); | 2101 | hash = rt_hash(daddr, saddr, iif); |
2099 | 2102 | ||
2100 | rcu_read_lock(); | 2103 | rcu_read_lock(); |
2101 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; | 2104 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; |
@@ -2169,7 +2172,7 @@ static inline int __mkroute_output(struct rtable **result, | |||
2169 | if (LOOPBACK(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK)) | 2172 | if (LOOPBACK(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK)) |
2170 | return -EINVAL; | 2173 | return -EINVAL; |
2171 | 2174 | ||
2172 | if (fl->fl4_dst == 0xFFFFFFFF) | 2175 | if (fl->fl4_dst == htonl(0xFFFFFFFF)) |
2173 | res->type = RTN_BROADCAST; | 2176 | res->type = RTN_BROADCAST; |
2174 | else if (MULTICAST(fl->fl4_dst)) | 2177 | else if (MULTICAST(fl->fl4_dst)) |
2175 | res->type = RTN_MULTICAST; | 2178 | res->type = RTN_MULTICAST; |
@@ -2293,8 +2296,7 @@ static inline int ip_mkroute_output_def(struct rtable **rp, | |||
2293 | int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); | 2296 | int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); |
2294 | unsigned hash; | 2297 | unsigned hash; |
2295 | if (err == 0) { | 2298 | if (err == 0) { |
2296 | hash = rt_hash_code(oldflp->fl4_dst, | 2299 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif); |
2297 | oldflp->fl4_src ^ (oldflp->oif << 5)); | ||
2298 | err = rt_intern_hash(hash, rth, rp); | 2300 | err = rt_intern_hash(hash, rth, rp); |
2299 | } | 2301 | } |
2300 | 2302 | ||
@@ -2336,9 +2338,8 @@ static inline int ip_mkroute_output(struct rtable** rp, | |||
2336 | if (err != 0) | 2338 | if (err != 0) |
2337 | goto cleanup; | 2339 | goto cleanup; |
2338 | 2340 | ||
2339 | hash = rt_hash_code(oldflp->fl4_dst, | 2341 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, |
2340 | oldflp->fl4_src ^ | 2342 | oldflp->oif); |
2341 | (oldflp->oif << 5)); | ||
2342 | err = rt_intern_hash(hash, rth, rp); | 2343 | err = rt_intern_hash(hash, rth, rp); |
2343 | 2344 | ||
2344 | /* forward hop information to multipath impl. */ | 2345 | /* forward hop information to multipath impl. */ |
@@ -2417,7 +2418,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) | |||
2417 | */ | 2418 | */ |
2418 | 2419 | ||
2419 | if (oldflp->oif == 0 | 2420 | if (oldflp->oif == 0 |
2420 | && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == 0xFFFFFFFF)) { | 2421 | && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF))) { |
2421 | /* Special hack: user can direct multicasts | 2422 | /* Special hack: user can direct multicasts |
2422 | and limited broadcast via necessary interface | 2423 | and limited broadcast via necessary interface |
2423 | without fiddling with IP_MULTICAST_IF or IP_PKTINFO. | 2424 | without fiddling with IP_MULTICAST_IF or IP_PKTINFO. |
@@ -2454,7 +2455,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) | |||
2454 | goto out; /* Wrong error code */ | 2455 | goto out; /* Wrong error code */ |
2455 | } | 2456 | } |
2456 | 2457 | ||
2457 | if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == 0xFFFFFFFF) { | 2458 | if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF)) { |
2458 | if (!fl.fl4_src) | 2459 | if (!fl.fl4_src) |
2459 | fl.fl4_src = inet_select_addr(dev_out, 0, | 2460 | fl.fl4_src = inet_select_addr(dev_out, 0, |
2460 | RT_SCOPE_LINK); | 2461 | RT_SCOPE_LINK); |
@@ -2567,7 +2568,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp) | |||
2567 | unsigned hash; | 2568 | unsigned hash; |
2568 | struct rtable *rth; | 2569 | struct rtable *rth; |
2569 | 2570 | ||
2570 | hash = rt_hash_code(flp->fl4_dst, flp->fl4_src ^ (flp->oif << 5)); | 2571 | hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif); |
2571 | 2572 | ||
2572 | rcu_read_lock_bh(); | 2573 | rcu_read_lock_bh(); |
2573 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; | 2574 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; |
@@ -2660,11 +2661,11 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
2660 | if (rt->rt_flags & RTCF_NOTIFY) | 2661 | if (rt->rt_flags & RTCF_NOTIFY) |
2661 | r->rtm_flags |= RTM_F_NOTIFY; | 2662 | r->rtm_flags |= RTM_F_NOTIFY; |
2662 | 2663 | ||
2663 | NLA_PUT_U32(skb, RTA_DST, rt->rt_dst); | 2664 | NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst); |
2664 | 2665 | ||
2665 | if (rt->fl.fl4_src) { | 2666 | if (rt->fl.fl4_src) { |
2666 | r->rtm_src_len = 32; | 2667 | r->rtm_src_len = 32; |
2667 | NLA_PUT_U32(skb, RTA_SRC, rt->fl.fl4_src); | 2668 | NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src); |
2668 | } | 2669 | } |
2669 | if (rt->u.dst.dev) | 2670 | if (rt->u.dst.dev) |
2670 | NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex); | 2671 | NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex); |
@@ -2677,12 +2678,12 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
2677 | NLA_PUT_U32(skb, RTA_MP_ALGO, rt->rt_multipath_alg); | 2678 | NLA_PUT_U32(skb, RTA_MP_ALGO, rt->rt_multipath_alg); |
2678 | #endif | 2679 | #endif |
2679 | if (rt->fl.iif) | 2680 | if (rt->fl.iif) |
2680 | NLA_PUT_U32(skb, RTA_PREFSRC, rt->rt_spec_dst); | 2681 | NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); |
2681 | else if (rt->rt_src != rt->fl.fl4_src) | 2682 | else if (rt->rt_src != rt->fl.fl4_src) |
2682 | NLA_PUT_U32(skb, RTA_PREFSRC, rt->rt_src); | 2683 | NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src); |
2683 | 2684 | ||
2684 | if (rt->rt_dst != rt->rt_gateway) | 2685 | if (rt->rt_dst != rt->rt_gateway) |
2685 | NLA_PUT_U32(skb, RTA_GATEWAY, rt->rt_gateway); | 2686 | NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway); |
2686 | 2687 | ||
2687 | if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) | 2688 | if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) |
2688 | goto nla_put_failure; | 2689 | goto nla_put_failure; |
@@ -2706,7 +2707,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
2706 | 2707 | ||
2707 | if (rt->fl.iif) { | 2708 | if (rt->fl.iif) { |
2708 | #ifdef CONFIG_IP_MROUTE | 2709 | #ifdef CONFIG_IP_MROUTE |
2709 | u32 dst = rt->rt_dst; | 2710 | __be32 dst = rt->rt_dst; |
2710 | 2711 | ||
2711 | if (MULTICAST(dst) && !LOCAL_MCAST(dst) && | 2712 | if (MULTICAST(dst) && !LOCAL_MCAST(dst) && |
2712 | ipv4_devconf.mc_forwarding) { | 2713 | ipv4_devconf.mc_forwarding) { |
@@ -2740,7 +2741,9 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) | |||
2740 | struct rtmsg *rtm; | 2741 | struct rtmsg *rtm; |
2741 | struct nlattr *tb[RTA_MAX+1]; | 2742 | struct nlattr *tb[RTA_MAX+1]; |
2742 | struct rtable *rt = NULL; | 2743 | struct rtable *rt = NULL; |
2743 | u32 dst, src, iif; | 2744 | __be32 dst = 0; |
2745 | __be32 src = 0; | ||
2746 | u32 iif; | ||
2744 | int err; | 2747 | int err; |
2745 | struct sk_buff *skb; | 2748 | struct sk_buff *skb; |
2746 | 2749 | ||
@@ -2765,8 +2768,8 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) | |||
2765 | skb->nh.iph->protocol = IPPROTO_ICMP; | 2768 | skb->nh.iph->protocol = IPPROTO_ICMP; |
2766 | skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); | 2769 | skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); |
2767 | 2770 | ||
2768 | src = tb[RTA_SRC] ? nla_get_u32(tb[RTA_SRC]) : 0; | 2771 | src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0; |
2769 | dst = tb[RTA_DST] ? nla_get_u32(tb[RTA_DST]) : 0; | 2772 | dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0; |
2770 | iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; | 2773 | iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; |
2771 | 2774 | ||
2772 | if (iif) { | 2775 | if (iif) { |