diff options
author | Eric Dumazet <edumazet@google.com> | 2014-10-23 15:58:58 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-10-25 16:10:04 -0400 |
commit | 349ce993ac706869d553a1816426d3a4bfda02b1 (patch) | |
tree | 732254796f8ee7b580db164f2f9732a2e948093e /net/ipv4/tcp.c | |
parent | 4cc40af08032a513e2e68fa6d7818b77179a86af (diff) |
tcp: md5: do not use alloc_percpu()
percpu tcp_md5sig_pool contains memory blobs that ultimately
go through sg_set_buf().
-> sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
This requires that whole area is in a physically contiguous portion
of memory. And that @buf is not backed by vmalloc().
Given that alloc_percpu() can use vmalloc() areas, this does not
fit the requirements.
Replace alloc_percpu() by a static DEFINE_PER_CPU() as tcp_md5sig_pool
is small anyway, there is no gain to dynamically allocate it.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Fixes: 765cf9976e93 ("tcp: md5: remove one indirection level in tcp_md5sig_pool")
Reported-by: Crestez Dan Leonard <cdleonard@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 59 |
1 files changed, 20 insertions, 39 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1bec4e76d88c..39ec0c379545 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2868,61 +2868,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt); | |||
2868 | #endif | 2868 | #endif |
2869 | 2869 | ||
2870 | #ifdef CONFIG_TCP_MD5SIG | 2870 | #ifdef CONFIG_TCP_MD5SIG |
2871 | static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly; | 2871 | static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); |
2872 | static DEFINE_MUTEX(tcp_md5sig_mutex); | 2872 | static DEFINE_MUTEX(tcp_md5sig_mutex); |
2873 | 2873 | static bool tcp_md5sig_pool_populated = false; | |
2874 | static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool) | ||
2875 | { | ||
2876 | int cpu; | ||
2877 | |||
2878 | for_each_possible_cpu(cpu) { | ||
2879 | struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu); | ||
2880 | |||
2881 | if (p->md5_desc.tfm) | ||
2882 | crypto_free_hash(p->md5_desc.tfm); | ||
2883 | } | ||
2884 | free_percpu(pool); | ||
2885 | } | ||
2886 | 2874 | ||
2887 | static void __tcp_alloc_md5sig_pool(void) | 2875 | static void __tcp_alloc_md5sig_pool(void) |
2888 | { | 2876 | { |
2889 | int cpu; | 2877 | int cpu; |
2890 | struct tcp_md5sig_pool __percpu *pool; | ||
2891 | |||
2892 | pool = alloc_percpu(struct tcp_md5sig_pool); | ||
2893 | if (!pool) | ||
2894 | return; | ||
2895 | 2878 | ||
2896 | for_each_possible_cpu(cpu) { | 2879 | for_each_possible_cpu(cpu) { |
2897 | struct crypto_hash *hash; | 2880 | if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) { |
2898 | 2881 | struct crypto_hash *hash; | |
2899 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); | ||
2900 | if (IS_ERR_OR_NULL(hash)) | ||
2901 | goto out_free; | ||
2902 | 2882 | ||
2903 | per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; | 2883 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); |
2884 | if (IS_ERR_OR_NULL(hash)) | ||
2885 | return; | ||
2886 | per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash; | ||
2887 | } | ||
2904 | } | 2888 | } |
2905 | /* before setting tcp_md5sig_pool, we must commit all writes | 2889 | /* before setting tcp_md5sig_pool_populated, we must commit all writes |
2906 | * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool() | 2890 | * to memory. See smp_rmb() in tcp_get_md5sig_pool() |
2907 | */ | 2891 | */ |
2908 | smp_wmb(); | 2892 | smp_wmb(); |
2909 | tcp_md5sig_pool = pool; | 2893 | tcp_md5sig_pool_populated = true; |
2910 | return; | ||
2911 | out_free: | ||
2912 | __tcp_free_md5sig_pool(pool); | ||
2913 | } | 2894 | } |
2914 | 2895 | ||
2915 | bool tcp_alloc_md5sig_pool(void) | 2896 | bool tcp_alloc_md5sig_pool(void) |
2916 | { | 2897 | { |
2917 | if (unlikely(!tcp_md5sig_pool)) { | 2898 | if (unlikely(!tcp_md5sig_pool_populated)) { |
2918 | mutex_lock(&tcp_md5sig_mutex); | 2899 | mutex_lock(&tcp_md5sig_mutex); |
2919 | 2900 | ||
2920 | if (!tcp_md5sig_pool) | 2901 | if (!tcp_md5sig_pool_populated) |
2921 | __tcp_alloc_md5sig_pool(); | 2902 | __tcp_alloc_md5sig_pool(); |
2922 | 2903 | ||
2923 | mutex_unlock(&tcp_md5sig_mutex); | 2904 | mutex_unlock(&tcp_md5sig_mutex); |
2924 | } | 2905 | } |
2925 | return tcp_md5sig_pool != NULL; | 2906 | return tcp_md5sig_pool_populated; |
2926 | } | 2907 | } |
2927 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | 2908 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); |
2928 | 2909 | ||
@@ -2936,13 +2917,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | |||
2936 | */ | 2917 | */ |
2937 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | 2918 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) |
2938 | { | 2919 | { |
2939 | struct tcp_md5sig_pool __percpu *p; | ||
2940 | |||
2941 | local_bh_disable(); | 2920 | local_bh_disable(); |
2942 | p = ACCESS_ONCE(tcp_md5sig_pool); | ||
2943 | if (p) | ||
2944 | return raw_cpu_ptr(p); | ||
2945 | 2921 | ||
2922 | if (tcp_md5sig_pool_populated) { | ||
2923 | /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ | ||
2924 | smp_rmb(); | ||
2925 | return this_cpu_ptr(&tcp_md5sig_pool); | ||
2926 | } | ||
2946 | local_bh_enable(); | 2927 | local_bh_enable(); |
2947 | return NULL; | 2928 | return NULL; |
2948 | } | 2929 | } |