aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/route.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r--net/ipv4/route.c33
1 files changed, 18 insertions, 15 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f45f2a12f37b..d0362a2de3d3 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -457,12 +457,9 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
457} 457}
458 458
459#define IP_IDENTS_SZ 2048u 459#define IP_IDENTS_SZ 2048u
460struct ip_ident_bucket {
461 atomic_t id;
462 u32 stamp32;
463};
464 460
465static struct ip_ident_bucket *ip_idents __read_mostly; 461static atomic_t *ip_idents __read_mostly;
462static u32 *ip_tstamps __read_mostly;
466 463
467/* In order to protect privacy, we add a perturbation to identifiers 464/* In order to protect privacy, we add a perturbation to identifiers
468 * if one generator is seldom used. This makes hard for an attacker 465 * if one generator is seldom used. This makes hard for an attacker
@@ -470,15 +467,16 @@ static struct ip_ident_bucket *ip_idents __read_mostly;
470 */ 467 */
471u32 ip_idents_reserve(u32 hash, int segs) 468u32 ip_idents_reserve(u32 hash, int segs)
472{ 469{
473 struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ; 470 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
474 u32 old = ACCESS_ONCE(bucket->stamp32); 471 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
472 u32 old = ACCESS_ONCE(*p_tstamp);
475 u32 now = (u32)jiffies; 473 u32 now = (u32)jiffies;
476 u32 delta = 0; 474 u32 delta = 0;
477 475
478 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old) 476 if (old != now && cmpxchg(p_tstamp, old, now) == old)
479 delta = prandom_u32_max(now - old); 477 delta = prandom_u32_max(now - old);
480 478
481 return atomic_add_return(segs + delta, &bucket->id) - segs; 479 return atomic_add_return(segs + delta, p_id) - segs;
482} 480}
483EXPORT_SYMBOL(ip_idents_reserve); 481EXPORT_SYMBOL(ip_idents_reserve);
484 482
@@ -749,7 +747,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
749 if (!(n->nud_state & NUD_VALID)) { 747 if (!(n->nud_state & NUD_VALID)) {
750 neigh_event_send(n, NULL); 748 neigh_event_send(n, NULL);
751 } else { 749 } else {
752 if (fib_lookup(net, fl4, &res) == 0) { 750 if (fib_lookup(net, fl4, &res, 0) == 0) {
753 struct fib_nh *nh = &FIB_RES_NH(res); 751 struct fib_nh *nh = &FIB_RES_NH(res);
754 752
755 update_or_create_fnhe(nh, fl4->daddr, new_gw, 753 update_or_create_fnhe(nh, fl4->daddr, new_gw,
@@ -977,7 +975,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
977 return; 975 return;
978 976
979 rcu_read_lock(); 977 rcu_read_lock();
980 if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) { 978 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
981 struct fib_nh *nh = &FIB_RES_NH(res); 979 struct fib_nh *nh = &FIB_RES_NH(res);
982 980
983 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, 981 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
@@ -1188,7 +1186,7 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1188 fl4.flowi4_mark = skb->mark; 1186 fl4.flowi4_mark = skb->mark;
1189 1187
1190 rcu_read_lock(); 1188 rcu_read_lock();
1191 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0) 1189 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1192 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res); 1190 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1193 else 1191 else
1194 src = inet_select_addr(rt->dst.dev, 1192 src = inet_select_addr(rt->dst.dev,
@@ -1718,7 +1716,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1718 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 1716 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1719 fl4.daddr = daddr; 1717 fl4.daddr = daddr;
1720 fl4.saddr = saddr; 1718 fl4.saddr = saddr;
1721 err = fib_lookup(net, &fl4, &res); 1719 err = fib_lookup(net, &fl4, &res, 0);
1722 if (err != 0) { 1720 if (err != 0) {
1723 if (!IN_DEV_FORWARD(in_dev)) 1721 if (!IN_DEV_FORWARD(in_dev))
1724 err = -EHOSTUNREACH; 1722 err = -EHOSTUNREACH;
@@ -2097,7 +2095,8 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2097 goto out; 2095 goto out;
2098 } 2096 }
2099 if (ipv4_is_local_multicast(fl4->daddr) || 2097 if (ipv4_is_local_multicast(fl4->daddr) ||
2100 ipv4_is_lbcast(fl4->daddr)) { 2098 ipv4_is_lbcast(fl4->daddr) ||
2099 fl4->flowi4_proto == IPPROTO_IGMP) {
2101 if (!fl4->saddr) 2100 if (!fl4->saddr)
2102 fl4->saddr = inet_select_addr(dev_out, 0, 2101 fl4->saddr = inet_select_addr(dev_out, 0,
2103 RT_SCOPE_LINK); 2102 RT_SCOPE_LINK);
@@ -2124,7 +2123,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2124 goto make_route; 2123 goto make_route;
2125 } 2124 }
2126 2125
2127 if (fib_lookup(net, fl4, &res)) { 2126 if (fib_lookup(net, fl4, &res, 0)) {
2128 res.fi = NULL; 2127 res.fi = NULL;
2129 res.table = NULL; 2128 res.table = NULL;
2130 if (fl4->flowi4_oif) { 2129 if (fl4->flowi4_oif) {
@@ -2742,6 +2741,10 @@ int __init ip_rt_init(void)
2742 2741
2743 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents)); 2742 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
2744 2743
2744 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
2745 if (!ip_tstamps)
2746 panic("IP: failed to allocate ip_tstamps\n");
2747
2745 for_each_possible_cpu(cpu) { 2748 for_each_possible_cpu(cpu) {
2746 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); 2749 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
2747 2750