aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-09-19 10:38:40 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-23 12:47:38 -0400
commit4cdf507d54525842dfd9f6313fdafba039084046 (patch)
tree3ea6c335251ee0b0bdb404df727ca307d55a9de9 /net/ipv6
parente8b56d55a30afe588d905913d011678235dda437 (diff)
icmp: add a global rate limitation
Current ICMP rate limiting uses inetpeer cache, which is an RBL tree protected by a lock, meaning that hosts can be stuck hard if all cpus want to check ICMP limits. When say a DNS or NTP server process is restarted, inetpeer tree grows quick and machine comes to its knees. iptables can not help because the bottleneck happens before ICMP messages are even cooked and sent. This patch adds a new global limitation, using a token bucket filter, controlled by two new sysctl : icmp_msgs_per_sec - INTEGER Limit maximal number of ICMP packets sent per second from this host. Only messages whose type matches icmp_ratemask are controlled by this limit. Default: 1000 icmp_msgs_burst - INTEGER icmp_msgs_per_sec controls number of ICMP packets sent per second, while icmp_msgs_burst controls the burst size of these packets. Default: 50 Note that if we really want to send millions of ICMP messages per second, we might extend idea and infra added in commit 04ca6973f7c1a ("ip: make IP identifiers less predictable") : add a token bucket in the ip_idents hash and no longer rely on inetpeer. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/icmp.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 394bb824fe4b..141e1f3ab74e 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -170,11 +170,11 @@ static bool is_ineligible(const struct sk_buff *skb)
170/* 170/*
171 * Check the ICMP output rate limit 171 * Check the ICMP output rate limit
172 */ 172 */
173static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type, 173static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
174 struct flowi6 *fl6) 174 struct flowi6 *fl6)
175{ 175{
176 struct dst_entry *dst;
177 struct net *net = sock_net(sk); 176 struct net *net = sock_net(sk);
177 struct dst_entry *dst;
178 bool res = false; 178 bool res = false;
179 179
180 /* Informational messages are not limited. */ 180 /* Informational messages are not limited. */
@@ -199,16 +199,20 @@ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
199 } else { 199 } else {
200 struct rt6_info *rt = (struct rt6_info *)dst; 200 struct rt6_info *rt = (struct rt6_info *)dst;
201 int tmo = net->ipv6.sysctl.icmpv6_time; 201 int tmo = net->ipv6.sysctl.icmpv6_time;
202 struct inet_peer *peer;
203 202
204 /* Give more bandwidth to wider prefixes. */ 203 /* Give more bandwidth to wider prefixes. */
205 if (rt->rt6i_dst.plen < 128) 204 if (rt->rt6i_dst.plen < 128)
206 tmo >>= ((128 - rt->rt6i_dst.plen)>>5); 205 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
207 206
208 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); 207 if (icmp_global_allow()) {
209 res = inet_peer_xrlim_allow(peer, tmo); 208 struct inet_peer *peer;
210 if (peer) 209
211 inet_putpeer(peer); 210 peer = inet_getpeer_v6(net->ipv6.peers,
211 &rt->rt6i_dst.addr, 1);
212 res = inet_peer_xrlim_allow(peer, tmo);
213 if (peer)
214 inet_putpeer(peer);
215 }
212 } 216 }
213 dst_release(dst); 217 dst_release(dst);
214 return res; 218 return res;