aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/request_sock.h
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2006-11-16 05:30:37 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-03 00:21:44 -0500
commit72a3effaf633bcae9034b7e176bdbd78d64a71db (patch)
treeb7a331527f1b15335a358f97809134f35587e57a /include/net/request_sock.h
parent3c62f75aac7348ee262b1295cfcfeb3473f76815 (diff)
[NET]: Size listen hash tables using backlog hint
We currently allocate a fixed size (TCP_SYNQ_HSIZE=512) slots hash table for each LISTEN socket, regardless of various parameters (listen backlog for example) On x86_64, this means order-1 allocations (might fail), even for 'small' sockets, expecting few connections. On the contrary, a huge server wanting a backlog of 50000 is slowed down a bit because of this fixed limit. This patch makes the sizing of listen hash table a dynamic parameter, depending of : - net.core.somaxconn tunable (default is 128) - net.ipv4.tcp_max_syn_backlog tunable (default : 256, 1024 or 128) - backlog value given by user application (2nd parameter of listen()) For large allocations (bigger than PAGE_SIZE), we use vmalloc() instead of kmalloc(). We still limit memory allocation with the two existing tunables (somaxconn & tcp_max_syn_backlog). So for standard setups, this patch actually reduce RAM usage. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/request_sock.h')
-rw-r--r--include/net/request_sock.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index f743a941a4f2..b5b023e79e5f 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -28,8 +28,8 @@ struct proto;
28 28
29struct request_sock_ops { 29struct request_sock_ops {
30 int family; 30 int family;
31 kmem_cache_t *slab;
32 int obj_size; 31 int obj_size;
32 kmem_cache_t *slab;
33 int (*rtx_syn_ack)(struct sock *sk, 33 int (*rtx_syn_ack)(struct sock *sk,
34 struct request_sock *req, 34 struct request_sock *req,
35 struct dst_entry *dst); 35 struct dst_entry *dst);
@@ -51,13 +51,13 @@ struct request_sock {
51 u32 rcv_wnd; /* rcv_wnd offered first time */ 51 u32 rcv_wnd; /* rcv_wnd offered first time */
52 u32 ts_recent; 52 u32 ts_recent;
53 unsigned long expires; 53 unsigned long expires;
54 struct request_sock_ops *rsk_ops; 54 const struct request_sock_ops *rsk_ops;
55 struct sock *sk; 55 struct sock *sk;
56 u32 secid; 56 u32 secid;
57 u32 peer_secid; 57 u32 peer_secid;
58}; 58};
59 59
60static inline struct request_sock *reqsk_alloc(struct request_sock_ops *ops) 60static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
61{ 61{
62 struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC); 62 struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC);
63 63
@@ -121,7 +121,7 @@ struct request_sock_queue {
121}; 121};
122 122
123extern int reqsk_queue_alloc(struct request_sock_queue *queue, 123extern int reqsk_queue_alloc(struct request_sock_queue *queue,
124 const int nr_table_entries); 124 unsigned int nr_table_entries);
125 125
126static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue) 126static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue)
127{ 127{