aboutsummaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 5d845ffd7982..5ba520b544d7 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -30,7 +30,7 @@
30 30
31#define HASH_DEFAULT_SIZE 64UL 31#define HASH_DEFAULT_SIZE 64UL
32#define HASH_MIN_SIZE 4U 32#define HASH_MIN_SIZE 4U
33#define BUCKET_LOCKS_PER_CPU 128UL 33#define BUCKET_LOCKS_PER_CPU 32UL
34 34
35static u32 head_hashfn(struct rhashtable *ht, 35static u32 head_hashfn(struct rhashtable *ht,
36 const struct bucket_table *tbl, 36 const struct bucket_table *tbl,
@@ -70,7 +70,7 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
70 unsigned int nr_pcpus = num_possible_cpus(); 70 unsigned int nr_pcpus = num_possible_cpus();
71#endif 71#endif
72 72
73 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); 73 nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
74 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); 74 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
75 75
76 /* Never allocate more than 0.5 locks per bucket */ 76 /* Never allocate more than 0.5 locks per bucket */
@@ -83,6 +83,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
83 tbl->locks = vmalloc(size * sizeof(spinlock_t)); 83 tbl->locks = vmalloc(size * sizeof(spinlock_t));
84 else 84 else
85#endif 85#endif
86 if (gfp != GFP_KERNEL)
87 gfp |= __GFP_NOWARN | __GFP_NORETRY;
88
86 tbl->locks = kmalloc_array(size, sizeof(spinlock_t), 89 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
87 gfp); 90 gfp);
88 if (!tbl->locks) 91 if (!tbl->locks)
@@ -321,12 +324,14 @@ static int rhashtable_expand(struct rhashtable *ht)
321static int rhashtable_shrink(struct rhashtable *ht) 324static int rhashtable_shrink(struct rhashtable *ht)
322{ 325{
323 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 326 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
324 unsigned int size; 327 unsigned int nelems = atomic_read(&ht->nelems);
328 unsigned int size = 0;
325 int err; 329 int err;
326 330
327 ASSERT_RHT_MUTEX(ht); 331 ASSERT_RHT_MUTEX(ht);
328 332
329 size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); 333 if (nelems)
334 size = roundup_pow_of_two(nelems * 3 / 2);
330 if (size < ht->p.min_size) 335 if (size < ht->p.min_size)
331 size = ht->p.min_size; 336 size = ht->p.min_size;
332 337