aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c38
1 files changed, 30 insertions, 8 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9bcdec3ad772..22ef5f9fd2ff 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1876,6 +1876,20 @@ void tcp_shutdown(struct sock *sk, int how)
1876} 1876}
1877EXPORT_SYMBOL(tcp_shutdown); 1877EXPORT_SYMBOL(tcp_shutdown);
1878 1878
1879bool tcp_check_oom(struct sock *sk, int shift)
1880{
1881 bool too_many_orphans, out_of_socket_memory;
1882
1883 too_many_orphans = tcp_too_many_orphans(sk, shift);
1884 out_of_socket_memory = tcp_out_of_memory(sk);
1885
1886 if (too_many_orphans && net_ratelimit())
1887 pr_info("TCP: too many orphaned sockets\n");
1888 if (out_of_socket_memory && net_ratelimit())
1889 pr_info("TCP: out of memory -- consider tuning tcp_mem\n");
1890 return too_many_orphans || out_of_socket_memory;
1891}
1892
1879void tcp_close(struct sock *sk, long timeout) 1893void tcp_close(struct sock *sk, long timeout)
1880{ 1894{
1881 struct sk_buff *skb; 1895 struct sk_buff *skb;
@@ -2015,10 +2029,7 @@ adjudge_to_death:
2015 } 2029 }
2016 if (sk->sk_state != TCP_CLOSE) { 2030 if (sk->sk_state != TCP_CLOSE) {
2017 sk_mem_reclaim(sk); 2031 sk_mem_reclaim(sk);
2018 if (tcp_too_many_orphans(sk, 0)) { 2032 if (tcp_check_oom(sk, 0)) {
2019 if (net_ratelimit())
2020 printk(KERN_INFO "TCP: too many of orphaned "
2021 "sockets\n");
2022 tcp_set_state(sk, TCP_CLOSE); 2033 tcp_set_state(sk, TCP_CLOSE);
2023 tcp_send_active_reset(sk, GFP_ATOMIC); 2034 tcp_send_active_reset(sk, GFP_ATOMIC);
2024 NET_INC_STATS_BH(sock_net(sk), 2035 NET_INC_STATS_BH(sock_net(sk),
@@ -3216,11 +3227,21 @@ static int __init set_thash_entries(char *str)
3216} 3227}
3217__setup("thash_entries=", set_thash_entries); 3228__setup("thash_entries=", set_thash_entries);
3218 3229
3230void tcp_init_mem(struct net *net)
3231{
3232 unsigned long limit = nr_free_buffer_pages() / 8;
3233 limit = max(limit, 128UL);
3234 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
3235 net->ipv4.sysctl_tcp_mem[1] = limit;
3236 net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
3237}
3238
3219void __init tcp_init(void) 3239void __init tcp_init(void)
3220{ 3240{
3221 struct sk_buff *skb = NULL; 3241 struct sk_buff *skb = NULL;
3222 unsigned long limit; 3242 unsigned long limit;
3223 int i, max_share, cnt; 3243 int max_share, cnt;
3244 unsigned int i;
3224 unsigned long jiffy = jiffies; 3245 unsigned long jiffy = jiffies;
3225 3246
3226 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); 3247 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -3263,7 +3284,7 @@ void __init tcp_init(void)
3263 &tcp_hashinfo.bhash_size, 3284 &tcp_hashinfo.bhash_size,
3264 NULL, 3285 NULL,
3265 64 * 1024); 3286 64 * 1024);
3266 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size; 3287 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3267 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 3288 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3268 spin_lock_init(&tcp_hashinfo.bhash[i].lock); 3289 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3269 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 3290 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
@@ -3276,9 +3297,10 @@ void __init tcp_init(void)
3276 sysctl_tcp_max_orphans = cnt / 2; 3297 sysctl_tcp_max_orphans = cnt / 2;
3277 sysctl_max_syn_backlog = max(128, cnt / 256); 3298 sysctl_max_syn_backlog = max(128, cnt / 256);
3278 3299
3300 tcp_init_mem(&init_net);
3279 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 3301 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3280 limit = ((unsigned long)init_net.ipv4.sysctl_tcp_mem[1]) 3302 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10);
3281 << (PAGE_SHIFT - 7); 3303 limit = max(limit, 128UL);
3282 max_share = min(4UL*1024*1024, limit); 3304 max_share = min(4UL*1024*1024, limit);
3283 3305
3284 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 3306 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;