aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/sock.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-04-27 18:13:20 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-27 18:13:20 -0400
commitc377411f2494a931ff7facdbb3a6839b1266bcf6 (patch)
tree6846cdcec913f50839e3916856f78f7e059ff5fb /net/core/sock.c
parent6e7676c1a76aed6e957611d8d7a9e5592e23aeba (diff)
net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks, because user thread spend many time to process a full backlog each round, and user might crazy spin on socket lock. We should add backlog size and receive_queue size (aka rmem_alloc) to pace writers, and let user run without being slow down too much. Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in stress situations. Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp receiver can now process ~200.000 pps (instead of ~100 pps before the patch) on a 8 core machine. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/sock.c')
-rw-r--r--net/core/sock.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 58ebd146ce5a..51041759517e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -327,6 +327,10 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
327 327
328 skb->dev = NULL; 328 skb->dev = NULL;
329 329
330 if (sk_rcvqueues_full(sk, skb)) {
331 atomic_inc(&sk->sk_drops);
332 goto discard_and_relse;
333 }
330 if (nested) 334 if (nested)
331 bh_lock_sock_nested(sk); 335 bh_lock_sock_nested(sk);
332 else 336 else
@@ -1885,7 +1889,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1885 sk->sk_allocation = GFP_KERNEL; 1889 sk->sk_allocation = GFP_KERNEL;
1886 sk->sk_rcvbuf = sysctl_rmem_default; 1890 sk->sk_rcvbuf = sysctl_rmem_default;
1887 sk->sk_sndbuf = sysctl_wmem_default; 1891 sk->sk_sndbuf = sysctl_wmem_default;
1888 sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
1889 sk->sk_state = TCP_CLOSE; 1892 sk->sk_state = TCP_CLOSE;
1890 sk_set_socket(sk, sock); 1893 sk_set_socket(sk, sock);
1891 1894