diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2009-09-03 02:45:45 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-09-03 02:45:45 -0400 |
commit | aa1330766c49199bdab4d4a9096d98b072df9044 (patch) | |
tree | 98787478dbef0faa7caee09c4996abcda723a608 /net/ipv4/tcp.c | |
parent | 05c6a8d7a7d778f26d8eb821556988993b766092 (diff) |
tcp: replace hard coded GFP_KERNEL with sk_allocation
This fixed a lockdep warning which appeared when doing stress
memory tests over NFS:
inconsistent {RECLAIM_FS-ON-W} -> {IN-RECLAIM_FS-W} usage.
page reclaim => nfs_writepage => tcp_sendmsg => lock sk_lock
mount_root => nfs_root_data => tcp_close => lock sk_lock =>
tcp_send_fin => alloc_skb_fclone => page reclaim
David raised a concern that if the allocation fails in tcp_send_fin(), and it's
GFP_ATOMIC, we are going to yield() (which sleeps) and loop endlessly waiting
for the allocation to succeed.
But fact is, the original GFP_KERNEL also sleeps. GFP_ATOMIC+yield() looks
weird, but it is no worse the implicit sleep inside GFP_KERNEL. Both could
loop endlessly under memory pressure.
CC: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
CC: David S. Miller <davem@davemloft.net>
CC: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 59f69a6c5863..edeea060db44 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1839,7 +1839,7 @@ void tcp_close(struct sock *sk, long timeout) | |||
1839 | /* Unread data was tossed, zap the connection. */ | 1839 | /* Unread data was tossed, zap the connection. */ |
1840 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); | 1840 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); |
1841 | tcp_set_state(sk, TCP_CLOSE); | 1841 | tcp_set_state(sk, TCP_CLOSE); |
1842 | tcp_send_active_reset(sk, GFP_KERNEL); | 1842 | tcp_send_active_reset(sk, sk->sk_allocation); |
1843 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { | 1843 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { |
1844 | /* Check zero linger _after_ checking for unread data. */ | 1844 | /* Check zero linger _after_ checking for unread data. */ |
1845 | sk->sk_prot->disconnect(sk, 0); | 1845 | sk->sk_prot->disconnect(sk, 0); |
@@ -2658,7 +2658,7 @@ void tcp_free_md5sig_pool(void) | |||
2658 | 2658 | ||
2659 | EXPORT_SYMBOL(tcp_free_md5sig_pool); | 2659 | EXPORT_SYMBOL(tcp_free_md5sig_pool); |
2660 | 2660 | ||
2661 | static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) | 2661 | static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk) |
2662 | { | 2662 | { |
2663 | int cpu; | 2663 | int cpu; |
2664 | struct tcp_md5sig_pool **pool; | 2664 | struct tcp_md5sig_pool **pool; |
@@ -2671,7 +2671,7 @@ static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) | |||
2671 | struct tcp_md5sig_pool *p; | 2671 | struct tcp_md5sig_pool *p; |
2672 | struct crypto_hash *hash; | 2672 | struct crypto_hash *hash; |
2673 | 2673 | ||
2674 | p = kzalloc(sizeof(*p), GFP_KERNEL); | 2674 | p = kzalloc(sizeof(*p), sk->sk_allocation); |
2675 | if (!p) | 2675 | if (!p) |
2676 | goto out_free; | 2676 | goto out_free; |
2677 | *per_cpu_ptr(pool, cpu) = p; | 2677 | *per_cpu_ptr(pool, cpu) = p; |
@@ -2688,7 +2688,7 @@ out_free: | |||
2688 | return NULL; | 2688 | return NULL; |
2689 | } | 2689 | } |
2690 | 2690 | ||
2691 | struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void) | 2691 | struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk) |
2692 | { | 2692 | { |
2693 | struct tcp_md5sig_pool **pool; | 2693 | struct tcp_md5sig_pool **pool; |
2694 | int alloc = 0; | 2694 | int alloc = 0; |
@@ -2709,7 +2709,7 @@ retry: | |||
2709 | 2709 | ||
2710 | if (alloc) { | 2710 | if (alloc) { |
2711 | /* we cannot hold spinlock here because this may sleep. */ | 2711 | /* we cannot hold spinlock here because this may sleep. */ |
2712 | struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(); | 2712 | struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk); |
2713 | spin_lock_bh(&tcp_md5sig_pool_lock); | 2713 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2714 | if (!p) { | 2714 | if (!p) { |
2715 | tcp_md5sig_users--; | 2715 | tcp_md5sig_users--; |