aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-11-26 07:13:44 -0500
committerDavid S. Miller <davem@davemloft.net>2011-11-26 19:16:37 -0500
commitde68dca1816660b0d3ac89fa59ffb410007a143f (patch)
tree9a31e87c6e0504627df25d3a1d39ca7825dcd779 /net
parent0884d7aa24e15e72b3c07f7da910a13bb7df3592 (diff)
inet: add a redirect generation id in inetpeer
Now inetpeer is the place where we cache redirect information for ipv4 destinations, we must be able to invalidate informations when a route is added/removed on host. As inetpeer is not yet namespace aware, this patch adds a shared redirect_genid, and a per inetpeer redirect_genid. This might be changed later if inetpeer becomes ns aware. Cache information for one inerpeer is valid as long as its redirect_genid has the same value than global redirect_genid. Reported-by: Arkadiusz Miśkiewicz <a.miskiewicz@gmail.com> Tested-by: Arkadiusz Miśkiewicz <a.miskiewicz@gmail.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/route.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index fb47c8f0cd8..5c2847247f5 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -131,6 +131,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
131static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 131static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
132static int ip_rt_min_advmss __read_mostly = 256; 132static int ip_rt_min_advmss __read_mostly = 256;
133static int rt_chain_length_max __read_mostly = 20; 133static int rt_chain_length_max __read_mostly = 20;
134static int redirect_genid;
134 135
135/* 136/*
136 * Interface to generic destination cache. 137 * Interface to generic destination cache.
@@ -837,6 +838,7 @@ static void rt_cache_invalidate(struct net *net)
837 838
838 get_random_bytes(&shuffle, sizeof(shuffle)); 839 get_random_bytes(&shuffle, sizeof(shuffle));
839 atomic_add(shuffle + 1U, &net->ipv4.rt_genid); 840 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
841 redirect_genid++;
840} 842}
841 843
842/* 844/*
@@ -1391,8 +1393,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1391 1393
1392 peer = rt->peer; 1394 peer = rt->peer;
1393 if (peer) { 1395 if (peer) {
1394 if (peer->redirect_learned.a4 != new_gw) { 1396 if (peer->redirect_learned.a4 != new_gw ||
1397 peer->redirect_genid != redirect_genid) {
1395 peer->redirect_learned.a4 = new_gw; 1398 peer->redirect_learned.a4 = new_gw;
1399 peer->redirect_genid = redirect_genid;
1396 atomic_inc(&__rt_peer_genid); 1400 atomic_inc(&__rt_peer_genid);
1397 } 1401 }
1398 check_peer_redir(&rt->dst, peer); 1402 check_peer_redir(&rt->dst, peer);
@@ -1701,6 +1705,8 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1701 if (peer) { 1705 if (peer) {
1702 check_peer_pmtu(dst, peer); 1706 check_peer_pmtu(dst, peer);
1703 1707
1708 if (peer->redirect_genid != redirect_genid)
1709 peer->redirect_learned.a4 = 0;
1704 if (peer->redirect_learned.a4 && 1710 if (peer->redirect_learned.a4 &&
1705 peer->redirect_learned.a4 != rt->rt_gateway) { 1711 peer->redirect_learned.a4 != rt->rt_gateway) {
1706 if (check_peer_redir(dst, peer)) 1712 if (check_peer_redir(dst, peer))
@@ -1857,6 +1863,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1857 dst_init_metrics(&rt->dst, peer->metrics, false); 1863 dst_init_metrics(&rt->dst, peer->metrics, false);
1858 1864
1859 check_peer_pmtu(&rt->dst, peer); 1865 check_peer_pmtu(&rt->dst, peer);
1866 if (peer->redirect_genid != redirect_genid)
1867 peer->redirect_learned.a4 = 0;
1860 if (peer->redirect_learned.a4 && 1868 if (peer->redirect_learned.a4 &&
1861 peer->redirect_learned.a4 != rt->rt_gateway) { 1869 peer->redirect_learned.a4 != rt->rt_gateway) {
1862 rt->rt_gateway = peer->redirect_learned.a4; 1870 rt->rt_gateway = peer->redirect_learned.a4;