diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2015-12-04 09:39:56 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-12-04 16:53:05 -0500 |
commit | d3716f18a7d841565c930efde30737a3557eee69 (patch) | |
tree | 5f0599a1e442a490a1d87e610110f9245825bc2d /lib | |
parent | 6a61d4dbf4f54b5683e0f1e58d873cecca7cb977 (diff) |
rhashtable: Use __vmalloc with GFP_ATOMIC for table allocation
When an rhashtable user pounds rhashtable hard with back-to-back
insertions we may end up growing the table in GFP_ATOMIC context.
Unfortunately when the table reaches a certain size this often
fails because we don't have enough physically contiguous pages
to hold the new table.
Eric Dumazet suggested (and in fact wrote this patch) using
__vmalloc instead which can be used in GFP_ATOMIC context.
Reported-by: Phil Sutter <phil@nwl.cc>
Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/rhashtable.c | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 2ff7ed91663a..1c624db90e88 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -120,8 +120,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | |||
120 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) || | 120 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) || |
121 | gfp != GFP_KERNEL) | 121 | gfp != GFP_KERNEL) |
122 | tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); | 122 | tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); |
123 | if (tbl == NULL && gfp == GFP_KERNEL) | 123 | if (tbl == NULL) |
124 | tbl = vzalloc(size); | 124 | tbl = __vmalloc(size, gfp | __GFP_HIGHMEM | __GFP_ZERO, |
125 | PAGE_KERNEL); | ||
125 | if (tbl == NULL) | 126 | if (tbl == NULL) |
126 | return NULL; | 127 | return NULL; |
127 | 128 | ||