aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rhashtable.h
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-03-23 09:50:26 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-23 22:07:52 -0400
commitb824478b2145be78ac19e1cf44e2b9036c7a9608 (patch)
tree19c7d4ad479fd987eef5fd9b6063df27fd26012a /include/linux/rhashtable.h
parent18093d1c0d1e032142ee24825678b0a8977d74ba (diff)
rhashtable: Add multiple rehash support
This patch adds the missing bits to allow multiple rehashes. The read-side as well as remove already handle this correctly. So it's only the rehasher and insertion that need modification to handle this. Note that this patch doesn't actually enable it so for now rehashing is still only performed by the worker thread. This patch also disables the explicit expand/shrink interface because the table is meant to expand and shrink automatically, and continuing to export these interfaces unnecessarily complicates the life of the rehasher since the rehash process is now composed of two parts. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Acked-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/rhashtable.h')
-rw-r--r--include/linux/rhashtable.h26
1 files changed, 14 insertions, 12 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index bc2488b98321..e8ffcdb5e239 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -308,9 +308,6 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
308 struct rhash_head *obj, 308 struct rhash_head *obj,
309 struct bucket_table *old_tbl); 309 struct bucket_table *old_tbl);
310 310
311int rhashtable_expand(struct rhashtable *ht);
312int rhashtable_shrink(struct rhashtable *ht);
313
314int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); 311int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
315void rhashtable_walk_exit(struct rhashtable_iter *iter); 312void rhashtable_walk_exit(struct rhashtable_iter *iter);
316int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); 313int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
@@ -541,17 +538,22 @@ static inline int __rhashtable_insert_fast(
541 rcu_read_lock(); 538 rcu_read_lock();
542 539
543 tbl = rht_dereference_rcu(ht->tbl, ht); 540 tbl = rht_dereference_rcu(ht->tbl, ht);
544 hash = rht_head_hashfn(ht, tbl, obj, params);
545 lock = rht_bucket_lock(tbl, hash);
546
547 spin_lock_bh(lock);
548 541
549 /* Because we have already taken the bucket lock in tbl, 542 /* All insertions must grab the oldest table containing
550 * if we find that future_tbl is not yet visible then 543 * the hashed bucket that is yet to be rehashed.
551 * that guarantees all other insertions of the same entry
552 * will also grab the bucket lock in tbl because until
553 * the rehash completes ht->tbl won't be changed.
554 */ 544 */
545 for (;;) {
546 hash = rht_head_hashfn(ht, tbl, obj, params);
547 lock = rht_bucket_lock(tbl, hash);
548 spin_lock_bh(lock);
549
550 if (tbl->rehash <= hash)
551 break;
552
553 spin_unlock_bh(lock);
554 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
555 }
556
555 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); 557 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
556 if (unlikely(new_tbl)) { 558 if (unlikely(new_tbl)) {
557 err = rhashtable_insert_slow(ht, key, obj, new_tbl); 559 err = rhashtable_insert_slow(ht, key, obj, new_tbl);