aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/sock.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index a96ea7dd0fc1..ed2afdb9ea2d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -385,7 +385,21 @@ set_sndbuf:
385 val = sysctl_rmem_max; 385 val = sysctl_rmem_max;
386set_rcvbuf: 386set_rcvbuf:
387 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 387 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
388 /* FIXME: is this lower bound the right one? */ 388 /*
389 * We double it on the way in to account for
390 * "struct sk_buff" etc. overhead. Applications
391 * assume that the SO_RCVBUF setting they make will
392 * allow that much actual data to be received on that
393 * socket.
394 *
395 * Applications are unaware that "struct sk_buff" and
396 * other overheads allocate from the receive buffer
397 * during socket buffer allocation.
398 *
399 * And after considering the possible alternatives,
400 * returning the value we actually used in getsockopt
401 * is the most desirable behavior.
402 */
389 if ((val * 2) < SOCK_MIN_RCVBUF) 403 if ((val * 2) < SOCK_MIN_RCVBUF)
390 sk->sk_rcvbuf = SOCK_MIN_RCVBUF; 404 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
391 else 405 else