aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c30
1 files changed, 18 insertions, 12 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c5aca0bb116a..019243408623 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -277,8 +277,7 @@
277 277
278int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; 278int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
279 279
280atomic_t tcp_orphan_count = ATOMIC_INIT(0); 280struct percpu_counter tcp_orphan_count;
281
282EXPORT_SYMBOL_GPL(tcp_orphan_count); 281EXPORT_SYMBOL_GPL(tcp_orphan_count);
283 282
284int sysctl_tcp_mem[3] __read_mostly; 283int sysctl_tcp_mem[3] __read_mostly;
@@ -290,9 +289,12 @@ EXPORT_SYMBOL(sysctl_tcp_rmem);
290EXPORT_SYMBOL(sysctl_tcp_wmem); 289EXPORT_SYMBOL(sysctl_tcp_wmem);
291 290
292atomic_t tcp_memory_allocated; /* Current allocated memory. */ 291atomic_t tcp_memory_allocated; /* Current allocated memory. */
293atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
294
295EXPORT_SYMBOL(tcp_memory_allocated); 292EXPORT_SYMBOL(tcp_memory_allocated);
293
294/*
295 * Current number of TCP sockets.
296 */
297struct percpu_counter tcp_sockets_allocated;
296EXPORT_SYMBOL(tcp_sockets_allocated); 298EXPORT_SYMBOL(tcp_sockets_allocated);
297 299
298/* 300/*
@@ -1680,7 +1682,7 @@ void tcp_set_state(struct sock *sk, int state)
1680 inet_put_port(sk); 1682 inet_put_port(sk);
1681 /* fall through */ 1683 /* fall through */
1682 default: 1684 default:
1683 if (oldstate==TCP_ESTABLISHED) 1685 if (oldstate == TCP_ESTABLISHED)
1684 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 1686 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1685 } 1687 }
1686 1688
@@ -1690,7 +1692,7 @@ void tcp_set_state(struct sock *sk, int state)
1690 sk->sk_state = state; 1692 sk->sk_state = state;
1691 1693
1692#ifdef STATE_TRACE 1694#ifdef STATE_TRACE
1693 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]); 1695 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
1694#endif 1696#endif
1695} 1697}
1696EXPORT_SYMBOL_GPL(tcp_set_state); 1698EXPORT_SYMBOL_GPL(tcp_set_state);
@@ -1834,7 +1836,7 @@ adjudge_to_death:
1834 state = sk->sk_state; 1836 state = sk->sk_state;
1835 sock_hold(sk); 1837 sock_hold(sk);
1836 sock_orphan(sk); 1838 sock_orphan(sk);
1837 atomic_inc(sk->sk_prot->orphan_count); 1839 percpu_counter_inc(sk->sk_prot->orphan_count);
1838 1840
1839 /* It is the last release_sock in its life. It will remove backlog. */ 1841 /* It is the last release_sock in its life. It will remove backlog. */
1840 release_sock(sk); 1842 release_sock(sk);
@@ -1885,9 +1887,11 @@ adjudge_to_death:
1885 } 1887 }
1886 } 1888 }
1887 if (sk->sk_state != TCP_CLOSE) { 1889 if (sk->sk_state != TCP_CLOSE) {
1890 int orphan_count = percpu_counter_read_positive(
1891 sk->sk_prot->orphan_count);
1892
1888 sk_mem_reclaim(sk); 1893 sk_mem_reclaim(sk);
1889 if (tcp_too_many_orphans(sk, 1894 if (tcp_too_many_orphans(sk, orphan_count)) {
1890 atomic_read(sk->sk_prot->orphan_count))) {
1891 if (net_ratelimit()) 1895 if (net_ratelimit())
1892 printk(KERN_INFO "TCP: too many of orphaned " 1896 printk(KERN_INFO "TCP: too many of orphaned "
1893 "sockets\n"); 1897 "sockets\n");
@@ -2650,7 +2654,7 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
2650 2654
2651void tcp_done(struct sock *sk) 2655void tcp_done(struct sock *sk)
2652{ 2656{
2653 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 2657 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2654 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 2658 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
2655 2659
2656 tcp_set_state(sk, TCP_CLOSE); 2660 tcp_set_state(sk, TCP_CLOSE);
@@ -2685,6 +2689,8 @@ void __init tcp_init(void)
2685 2689
2686 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); 2690 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
2687 2691
2692 percpu_counter_init(&tcp_sockets_allocated, 0);
2693 percpu_counter_init(&tcp_orphan_count, 0);
2688 tcp_hashinfo.bind_bucket_cachep = 2694 tcp_hashinfo.bind_bucket_cachep =
2689 kmem_cache_create("tcp_bind_bucket", 2695 kmem_cache_create("tcp_bind_bucket",
2690 sizeof(struct inet_bind_bucket), 0, 2696 sizeof(struct inet_bind_bucket), 0,
@@ -2707,8 +2713,8 @@ void __init tcp_init(void)
2707 thash_entries ? 0 : 512 * 1024); 2713 thash_entries ? 0 : 512 * 1024);
2708 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size; 2714 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2709 for (i = 0; i < tcp_hashinfo.ehash_size; i++) { 2715 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
2710 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain); 2716 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
2711 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain); 2717 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
2712 } 2718 }
2713 if (inet_ehash_locks_alloc(&tcp_hashinfo)) 2719 if (inet_ehash_locks_alloc(&tcp_hashinfo))
2714 panic("TCP: failed to alloc ehash_locks"); 2720 panic("TCP: failed to alloc ehash_locks");