aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2009-08-29 02:52:01 -0400
committerDavid S. Miller <davem@davemloft.net>2009-08-29 02:52:01 -0400
commit30038fc61adfdab162b1966e34261f06eda67f02 (patch)
treec9266e5d3c54c7a9a4b17bd68b28b24d629249b2 /net/ipv4
parentdf19a6267705456f463871ae2aabc44299909d2a (diff)
net: ip_rt_send_redirect() optimization
While doing some forwarding benchmarks, I noticed ip_rt_send_redirect() is rather expensive, even if send_redirects is false for the device. Fix is to avoid two atomic ops, we dont really need to take a reference on in_dev Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/route.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index fafbe163e2b5..91867d3e6328 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1514,13 +1514,17 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1514void ip_rt_send_redirect(struct sk_buff *skb) 1514void ip_rt_send_redirect(struct sk_buff *skb)
1515{ 1515{
1516 struct rtable *rt = skb_rtable(skb); 1516 struct rtable *rt = skb_rtable(skb);
1517 struct in_device *in_dev = in_dev_get(rt->u.dst.dev); 1517 struct in_device *in_dev;
1518 int log_martians;
1518 1519
1519 if (!in_dev) 1520 rcu_read_lock();
1521 in_dev = __in_dev_get_rcu(rt->u.dst.dev);
1522 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1523 rcu_read_unlock();
1520 return; 1524 return;
1521 1525 }
1522 if (!IN_DEV_TX_REDIRECTS(in_dev)) 1526 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1523 goto out; 1527 rcu_read_unlock();
1524 1528
1525 /* No redirected packets during ip_rt_redirect_silence; 1529 /* No redirected packets during ip_rt_redirect_silence;
1526 * reset the algorithm. 1530 * reset the algorithm.
@@ -1533,7 +1537,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1533 */ 1537 */
1534 if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) { 1538 if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
1535 rt->u.dst.rate_last = jiffies; 1539 rt->u.dst.rate_last = jiffies;
1536 goto out; 1540 return;
1537 } 1541 }
1538 1542
1539 /* Check for load limit; set rate_last to the latest sent 1543 /* Check for load limit; set rate_last to the latest sent
@@ -1547,7 +1551,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1547 rt->u.dst.rate_last = jiffies; 1551 rt->u.dst.rate_last = jiffies;
1548 ++rt->u.dst.rate_tokens; 1552 ++rt->u.dst.rate_tokens;
1549#ifdef CONFIG_IP_ROUTE_VERBOSE 1553#ifdef CONFIG_IP_ROUTE_VERBOSE
1550 if (IN_DEV_LOG_MARTIANS(in_dev) && 1554 if (log_martians &&
1551 rt->u.dst.rate_tokens == ip_rt_redirect_number && 1555 rt->u.dst.rate_tokens == ip_rt_redirect_number &&
1552 net_ratelimit()) 1556 net_ratelimit())
1553 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", 1557 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
@@ -1555,8 +1559,6 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1555 &rt->rt_dst, &rt->rt_gateway); 1559 &rt->rt_dst, &rt->rt_gateway);
1556#endif 1560#endif
1557 } 1561 }
1558out:
1559 in_dev_put(in_dev);
1560} 1562}
1561 1563
1562static int ip_error(struct sk_buff *skb) 1564static int ip_error(struct sk_buff *skb)