summaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-03-23 09:50:25 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-23 22:07:52 -0400
commit18093d1c0d1e032142ee24825678b0a8977d74ba (patch)
tree2740fc6be545509613d41984d161efdb97f5d4f2 /lib/rhashtable.c
parent6d022949810b1ea82d46a576d6166035720bbb32 (diff)
rhashtable: Shrink to fit
This patch changes rhashtable_shrink to shrink to the smallest size possible rather than halving the table. This is needed because with multiple rehashing we will defer shrinking until all other rehashing is done, meaning that when we do shrink we may be able to shrink a lot. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Acked-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 798f01d64ab0..9623be345d9c 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -261,8 +261,8 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
261 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups 261 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
262 * @ht: the hash table to shrink 262 * @ht: the hash table to shrink
263 * 263 *
264 * This function may only be called in a context where it is safe to call 264 * This function shrinks the hash table to fit, i.e., the smallest
265 * synchronize_rcu(), e.g. not within a rcu_read_lock() section. 265 * size would not cause it to expand right away automatically.
266 * 266 *
267 * The caller must ensure that no concurrent resizing occurs by holding 267 * The caller must ensure that no concurrent resizing occurs by holding
268 * ht->mutex. 268 * ht->mutex.
@@ -276,10 +276,17 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
276int rhashtable_shrink(struct rhashtable *ht) 276int rhashtable_shrink(struct rhashtable *ht)
277{ 277{
278 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 278 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
279 unsigned size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
279 280
280 ASSERT_RHT_MUTEX(ht); 281 ASSERT_RHT_MUTEX(ht);
281 282
282 new_tbl = bucket_table_alloc(ht, old_tbl->size / 2); 283 if (size < ht->p.min_size)
284 size = ht->p.min_size;
285
286 if (old_tbl->size <= size)
287 return 0;
288
289 new_tbl = bucket_table_alloc(ht, size);
283 if (new_tbl == NULL) 290 if (new_tbl == NULL)
284 return -ENOMEM; 291 return -ENOMEM;
285 292