diff options
author | David S. Miller <davem@davemloft.net> | 2011-02-04 18:55:25 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-02-04 18:59:53 -0500 |
commit | 92d8682926342d2b6aa5b2ecc02221e00e1573a0 (patch) | |
tree | 7f70b9cc2975716ab60ddd632b9fecf0a51b828d /net/ipv4/route.c | |
parent | 0131ba451e20239c5dc701027c1a2edef95e1a6e (diff) |
inetpeer: Move ICMP rate limiting state into inet_peer entries.
Like metrics, the ICMP rate limiting bits are cached state about
a destination. So move it into the inet_peer entries.
If an inet_peer cannot be bound (the reason is memory allocation
failure or similar), the policy is to allow.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r-- | net/ipv4/route.c | 56 |
1 files changed, 38 insertions, 18 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 0ba6a382b2b4..2e225dafc4f8 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1563,6 +1563,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
1563 | { | 1563 | { |
1564 | struct rtable *rt = skb_rtable(skb); | 1564 | struct rtable *rt = skb_rtable(skb); |
1565 | struct in_device *in_dev; | 1565 | struct in_device *in_dev; |
1566 | struct inet_peer *peer; | ||
1566 | int log_martians; | 1567 | int log_martians; |
1567 | 1568 | ||
1568 | rcu_read_lock(); | 1569 | rcu_read_lock(); |
@@ -1574,33 +1575,41 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
1574 | log_martians = IN_DEV_LOG_MARTIANS(in_dev); | 1575 | log_martians = IN_DEV_LOG_MARTIANS(in_dev); |
1575 | rcu_read_unlock(); | 1576 | rcu_read_unlock(); |
1576 | 1577 | ||
1578 | if (!rt->peer) | ||
1579 | rt_bind_peer(rt, 1); | ||
1580 | peer = rt->peer; | ||
1581 | if (!peer) { | ||
1582 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); | ||
1583 | return; | ||
1584 | } | ||
1585 | |||
1577 | /* No redirected packets during ip_rt_redirect_silence; | 1586 | /* No redirected packets during ip_rt_redirect_silence; |
1578 | * reset the algorithm. | 1587 | * reset the algorithm. |
1579 | */ | 1588 | */ |
1580 | if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence)) | 1589 | if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) |
1581 | rt->dst.rate_tokens = 0; | 1590 | peer->rate_tokens = 0; |
1582 | 1591 | ||
1583 | /* Too many ignored redirects; do not send anything | 1592 | /* Too many ignored redirects; do not send anything |
1584 | * set dst.rate_last to the last seen redirected packet. | 1593 | * set dst.rate_last to the last seen redirected packet. |
1585 | */ | 1594 | */ |
1586 | if (rt->dst.rate_tokens >= ip_rt_redirect_number) { | 1595 | if (peer->rate_tokens >= ip_rt_redirect_number) { |
1587 | rt->dst.rate_last = jiffies; | 1596 | peer->rate_last = jiffies; |
1588 | return; | 1597 | return; |
1589 | } | 1598 | } |
1590 | 1599 | ||
1591 | /* Check for load limit; set rate_last to the latest sent | 1600 | /* Check for load limit; set rate_last to the latest sent |
1592 | * redirect. | 1601 | * redirect. |
1593 | */ | 1602 | */ |
1594 | if (rt->dst.rate_tokens == 0 || | 1603 | if (peer->rate_tokens == 0 || |
1595 | time_after(jiffies, | 1604 | time_after(jiffies, |
1596 | (rt->dst.rate_last + | 1605 | (peer->rate_last + |
1597 | (ip_rt_redirect_load << rt->dst.rate_tokens)))) { | 1606 | (ip_rt_redirect_load << peer->rate_tokens)))) { |
1598 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); | 1607 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); |
1599 | rt->dst.rate_last = jiffies; | 1608 | peer->rate_last = jiffies; |
1600 | ++rt->dst.rate_tokens; | 1609 | ++peer->rate_tokens; |
1601 | #ifdef CONFIG_IP_ROUTE_VERBOSE | 1610 | #ifdef CONFIG_IP_ROUTE_VERBOSE |
1602 | if (log_martians && | 1611 | if (log_martians && |
1603 | rt->dst.rate_tokens == ip_rt_redirect_number && | 1612 | peer->rate_tokens == ip_rt_redirect_number && |
1604 | net_ratelimit()) | 1613 | net_ratelimit()) |
1605 | printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", | 1614 | printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", |
1606 | &rt->rt_src, rt->rt_iif, | 1615 | &rt->rt_src, rt->rt_iif, |
@@ -1612,7 +1621,9 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
1612 | static int ip_error(struct sk_buff *skb) | 1621 | static int ip_error(struct sk_buff *skb) |
1613 | { | 1622 | { |
1614 | struct rtable *rt = skb_rtable(skb); | 1623 | struct rtable *rt = skb_rtable(skb); |
1624 | struct inet_peer *peer; | ||
1615 | unsigned long now; | 1625 | unsigned long now; |
1626 | bool send; | ||
1616 | int code; | 1627 | int code; |
1617 | 1628 | ||
1618 | switch (rt->dst.error) { | 1629 | switch (rt->dst.error) { |
@@ -1632,15 +1643,24 @@ static int ip_error(struct sk_buff *skb) | |||
1632 | break; | 1643 | break; |
1633 | } | 1644 | } |
1634 | 1645 | ||
1635 | now = jiffies; | 1646 | if (!rt->peer) |
1636 | rt->dst.rate_tokens += now - rt->dst.rate_last; | 1647 | rt_bind_peer(rt, 1); |
1637 | if (rt->dst.rate_tokens > ip_rt_error_burst) | 1648 | peer = rt->peer; |
1638 | rt->dst.rate_tokens = ip_rt_error_burst; | 1649 | |
1639 | rt->dst.rate_last = now; | 1650 | send = true; |
1640 | if (rt->dst.rate_tokens >= ip_rt_error_cost) { | 1651 | if (peer) { |
1641 | rt->dst.rate_tokens -= ip_rt_error_cost; | 1652 | now = jiffies; |
1642 | icmp_send(skb, ICMP_DEST_UNREACH, code, 0); | 1653 | peer->rate_tokens += now - peer->rate_last; |
1654 | if (peer->rate_tokens > ip_rt_error_burst) | ||
1655 | peer->rate_tokens = ip_rt_error_burst; | ||
1656 | peer->rate_last = now; | ||
1657 | if (peer->rate_tokens >= ip_rt_error_cost) | ||
1658 | peer->rate_tokens -= ip_rt_error_cost; | ||
1659 | else | ||
1660 | send = false; | ||
1643 | } | 1661 | } |
1662 | if (send) | ||
1663 | icmp_send(skb, ICMP_DEST_UNREACH, code, 0); | ||
1644 | 1664 | ||
1645 | out: kfree_skb(skb); | 1665 | out: kfree_skb(skb); |
1646 | return 0; | 1666 | return 0; |