diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-04-27 18:13:20 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-04-27 18:13:20 -0400 |
commit | c377411f2494a931ff7facdbb3a6839b1266bcf6 (patch) | |
tree | 6846cdcec913f50839e3916856f78f7e059ff5fb /include/net | |
parent | 6e7676c1a76aed6e957611d8d7a9e5592e23aeba (diff) |
net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net')
-rw-r--r-- | include/net/sock.h | 13 |
1 files changed, 11 insertions, 2 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 07822280d953..cf12b1e61fa6 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -256,7 +256,6 @@ struct sock { | |||
256 | struct sk_buff *head; | 256 | struct sk_buff *head; |
257 | struct sk_buff *tail; | 257 | struct sk_buff *tail; |
258 | int len; | 258 | int len; |
259 | int limit; | ||
260 | } sk_backlog; | 259 | } sk_backlog; |
261 | wait_queue_head_t *sk_sleep; | 260 | wait_queue_head_t *sk_sleep; |
262 | struct dst_entry *sk_dst_cache; | 261 | struct dst_entry *sk_dst_cache; |
@@ -608,10 +607,20 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) | |||
608 | skb->next = NULL; | 607 | skb->next = NULL; |
609 | } | 608 | } |
610 | 609 | ||
610 | /* | ||
611 | * Take into account size of receive queue and backlog queue | ||
612 | */ | ||
613 | static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) | ||
614 | { | ||
615 | unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); | ||
616 | |||
617 | return qsize + skb->truesize > sk->sk_rcvbuf; | ||
618 | } | ||
619 | |||
611 | /* The per-socket spinlock must be held here. */ | 620 | /* The per-socket spinlock must be held here. */ |
612 | static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb) | 621 | static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb) |
613 | { | 622 | { |
614 | if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1)) | 623 | if (sk_rcvqueues_full(sk, skb)) |
615 | return -ENOBUFS; | 624 | return -ENOBUFS; |
616 | 625 | ||
617 | __sk_add_backlog(sk, skb); | 626 | __sk_add_backlog(sk, skb); |