summaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 798f01d64ab0..9623be345d9c 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -261,8 +261,8 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
261 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups 261 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
262 * @ht: the hash table to shrink 262 * @ht: the hash table to shrink
263 * 263 *
264 * This function may only be called in a context where it is safe to call 264 * This function shrinks the hash table to fit, i.e., the smallest
265 * synchronize_rcu(), e.g. not within a rcu_read_lock() section. 265 * size would not cause it to expand right away automatically.
266 * 266 *
267 * The caller must ensure that no concurrent resizing occurs by holding 267 * The caller must ensure that no concurrent resizing occurs by holding
268 * ht->mutex. 268 * ht->mutex.
@@ -276,10 +276,17 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
276int rhashtable_shrink(struct rhashtable *ht) 276int rhashtable_shrink(struct rhashtable *ht)
277{ 277{
278 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 278 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
279 unsigned size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
279 280
280 ASSERT_RHT_MUTEX(ht); 281 ASSERT_RHT_MUTEX(ht);
281 282
282 new_tbl = bucket_table_alloc(ht, old_tbl->size / 2); 283 if (size < ht->p.min_size)
284 size = ht->p.min_size;
285
286 if (old_tbl->size <= size)
287 return 0;
288
289 new_tbl = bucket_table_alloc(ht, size);
283 if (new_tbl == NULL) 290 if (new_tbl == NULL)
284 return -ENOMEM; 291 return -ENOMEM;
285 292