aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/route.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r--net/ipv4/route.c52
1 files changed, 23 insertions, 29 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 5e676be3daeb..082239ffe34a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -89,6 +89,7 @@
89#include <linux/rcupdate.h> 89#include <linux/rcupdate.h>
90#include <linux/times.h> 90#include <linux/times.h>
91#include <linux/slab.h> 91#include <linux/slab.h>
92#include <linux/jhash.h>
92#include <net/dst.h> 93#include <net/dst.h>
93#include <net/net_namespace.h> 94#include <net/net_namespace.h>
94#include <net/protocol.h> 95#include <net/protocol.h>
@@ -456,39 +457,19 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
456 return neigh_create(&arp_tbl, pkey, dev); 457 return neigh_create(&arp_tbl, pkey, dev);
457} 458}
458 459
459/* 460atomic_t *ip_idents __read_mostly;
460 * Peer allocation may fail only in serious out-of-memory conditions. However 461EXPORT_SYMBOL(ip_idents);
461 * we still can generate some output.
462 * Random ID selection looks a bit dangerous because we have no chances to
463 * select ID being unique in a reasonable period of time.
464 * But broken packet identifier may be better than no packet at all.
465 */
466static void ip_select_fb_ident(struct iphdr *iph)
467{
468 static DEFINE_SPINLOCK(ip_fb_id_lock);
469 static u32 ip_fallback_id;
470 u32 salt;
471
472 spin_lock_bh(&ip_fb_id_lock);
473 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
474 iph->id = htons(salt & 0xFFFF);
475 ip_fallback_id = salt;
476 spin_unlock_bh(&ip_fb_id_lock);
477}
478 462
479void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) 463void __ip_select_ident(struct iphdr *iph, int segs)
480{ 464{
481 struct net *net = dev_net(dst->dev); 465 static u32 ip_idents_hashrnd __read_mostly;
482 struct inet_peer *peer; 466 u32 hash, id;
483 467
484 peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1); 468 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
485 if (peer) {
486 iph->id = htons(inet_getid(peer, more));
487 inet_putpeer(peer);
488 return;
489 }
490 469
491 ip_select_fb_ident(iph); 470 hash = jhash_1word((__force u32)iph->daddr, ip_idents_hashrnd);
471 id = ip_idents_reserve(hash, segs);
472 iph->id = htons(id);
492} 473}
493EXPORT_SYMBOL(__ip_select_ident); 474EXPORT_SYMBOL(__ip_select_ident);
494 475
@@ -993,6 +974,9 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
993 struct flowi4 fl4; 974 struct flowi4 fl4;
994 struct rtable *rt; 975 struct rtable *rt;
995 976
977 if (!mark)
978 mark = IP4_REPLY_MARK(net, skb->mark);
979
996 __build_flow_key(&fl4, NULL, iph, oif, 980 __build_flow_key(&fl4, NULL, iph, oif,
997 RT_TOS(iph->tos), protocol, mark, flow_flags); 981 RT_TOS(iph->tos), protocol, mark, flow_flags);
998 rt = __ip_route_output_key(net, &fl4); 982 rt = __ip_route_output_key(net, &fl4);
@@ -1010,6 +994,10 @@ static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1010 struct rtable *rt; 994 struct rtable *rt;
1011 995
1012 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); 996 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
997
998 if (!fl4.flowi4_mark)
999 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1000
1013 rt = __ip_route_output_key(sock_net(sk), &fl4); 1001 rt = __ip_route_output_key(sock_net(sk), &fl4);
1014 if (!IS_ERR(rt)) { 1002 if (!IS_ERR(rt)) {
1015 __ip_rt_update_pmtu(rt, &fl4, mtu); 1003 __ip_rt_update_pmtu(rt, &fl4, mtu);
@@ -2704,6 +2692,12 @@ int __init ip_rt_init(void)
2704{ 2692{
2705 int rc = 0; 2693 int rc = 0;
2706 2694
2695 ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
2696 if (!ip_idents)
2697 panic("IP: failed to allocate ip_idents\n");
2698
2699 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
2700
2707#ifdef CONFIG_IP_ROUTE_CLASSID 2701#ifdef CONFIG_IP_ROUTE_CLASSID
2708 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); 2702 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
2709 if (!ip_rt_acct) 2703 if (!ip_rt_acct)