diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2015-03-13 22:57:25 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-15 01:35:34 -0400 |
commit | c4db8848af6af92f90462258603be844baeab44d (patch) | |
tree | 582bc65b98879f06bc3e9f28b9adefdc90314725 /lib | |
parent | 63d512d0cffcae40505d9448abd509972465e846 (diff) |
rhashtable: Move future_tbl into struct bucket_table
This patch moves future_tbl to open up the possibility of having
multiple rehashes on the same table.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/rhashtable.c | 27 |
1 files changed, 11 insertions, 16 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index ff4ea1704546..9d53a46dcca9 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -207,8 +207,9 @@ static bool rht_shrink_below_30(const struct rhashtable *ht, | |||
207 | 207 | ||
208 | static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) | 208 | static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) |
209 | { | 209 | { |
210 | struct bucket_table *new_tbl = rht_dereference(ht->future_tbl, ht); | ||
211 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | 210 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
211 | struct bucket_table *new_tbl = | ||
212 | rht_dereference(old_tbl->future_tbl, ht) ?: old_tbl; | ||
212 | struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; | 213 | struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; |
213 | int err = -ENOENT; | 214 | int err = -ENOENT; |
214 | struct rhash_head *head, *next, *entry; | 215 | struct rhash_head *head, *next, *entry; |
@@ -273,10 +274,8 @@ static void rhashtable_rehash(struct rhashtable *ht, | |||
273 | 274 | ||
274 | /* Make insertions go into the new, empty table right away. Deletions | 275 | /* Make insertions go into the new, empty table right away. Deletions |
275 | * and lookups will be attempted in both tables until we synchronize. | 276 | * and lookups will be attempted in both tables until we synchronize. |
276 | * The synchronize_rcu() guarantees for the new table to be picked up | ||
277 | * so no new additions go into the old table while we relink. | ||
278 | */ | 277 | */ |
279 | rcu_assign_pointer(ht->future_tbl, new_tbl); | 278 | rcu_assign_pointer(old_tbl->future_tbl, new_tbl); |
280 | 279 | ||
281 | /* Ensure the new table is visible to readers. */ | 280 | /* Ensure the new table is visible to readers. */ |
282 | smp_wmb(); | 281 | smp_wmb(); |
@@ -400,7 +399,7 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | |||
400 | * also grab the bucket lock in old_tbl because until the | 399 | * also grab the bucket lock in old_tbl because until the |
401 | * rehash completes ht->tbl won't be changed. | 400 | * rehash completes ht->tbl won't be changed. |
402 | */ | 401 | */ |
403 | tbl = rht_dereference_rcu(ht->future_tbl, ht); | 402 | tbl = rht_dereference_rcu(old_tbl->future_tbl, ht) ?: old_tbl; |
404 | if (tbl != old_tbl) { | 403 | if (tbl != old_tbl) { |
405 | hash = head_hashfn(ht, tbl, obj); | 404 | hash = head_hashfn(ht, tbl, obj); |
406 | spin_lock_nested(bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); | 405 | spin_lock_nested(bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); |
@@ -525,7 +524,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) | |||
525 | * visible then that guarantees the entry to still be in | 524 | * visible then that guarantees the entry to still be in |
526 | * old_tbl if it exists. | 525 | * old_tbl if it exists. |
527 | */ | 526 | */ |
528 | tbl = rht_dereference_rcu(ht->future_tbl, ht); | 527 | tbl = rht_dereference_rcu(old_tbl->future_tbl, ht) ?: old_tbl; |
529 | if (!ret && old_tbl != tbl) | 528 | if (!ret && old_tbl != tbl) |
530 | ret = __rhashtable_remove(ht, tbl, obj); | 529 | ret = __rhashtable_remove(ht, tbl, obj); |
531 | 530 | ||
@@ -599,7 +598,7 @@ EXPORT_SYMBOL_GPL(rhashtable_lookup); | |||
599 | void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, | 598 | void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, |
600 | bool (*compare)(void *, void *), void *arg) | 599 | bool (*compare)(void *, void *), void *arg) |
601 | { | 600 | { |
602 | const struct bucket_table *tbl, *old_tbl; | 601 | const struct bucket_table *tbl; |
603 | struct rhash_head *he; | 602 | struct rhash_head *he; |
604 | u32 hash; | 603 | u32 hash; |
605 | 604 | ||
@@ -618,9 +617,8 @@ restart: | |||
618 | /* Ensure we see any new tables. */ | 617 | /* Ensure we see any new tables. */ |
619 | smp_rmb(); | 618 | smp_rmb(); |
620 | 619 | ||
621 | old_tbl = tbl; | 620 | tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
622 | tbl = rht_dereference_rcu(ht->future_tbl, ht); | 621 | if (unlikely(tbl)) |
623 | if (unlikely(tbl != old_tbl)) | ||
624 | goto restart; | 622 | goto restart; |
625 | rcu_read_unlock(); | 623 | rcu_read_unlock(); |
626 | 624 | ||
@@ -830,14 +828,13 @@ next: | |||
830 | iter->skip = 0; | 828 | iter->skip = 0; |
831 | } | 829 | } |
832 | 830 | ||
833 | iter->walker->tbl = rht_dereference_rcu(ht->future_tbl, ht); | 831 | iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
834 | if (iter->walker->tbl != tbl) { | 832 | if (iter->walker->tbl) { |
835 | iter->slot = 0; | 833 | iter->slot = 0; |
836 | iter->skip = 0; | 834 | iter->skip = 0; |
837 | return ERR_PTR(-EAGAIN); | 835 | return ERR_PTR(-EAGAIN); |
838 | } | 836 | } |
839 | 837 | ||
840 | iter->walker->tbl = NULL; | ||
841 | iter->p = NULL; | 838 | iter->p = NULL; |
842 | 839 | ||
843 | out: | 840 | out: |
@@ -865,8 +862,7 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter) | |||
865 | ht = iter->ht; | 862 | ht = iter->ht; |
866 | 863 | ||
867 | mutex_lock(&ht->mutex); | 864 | mutex_lock(&ht->mutex); |
868 | if (rht_dereference(ht->tbl, ht) == tbl || | 865 | if (tbl->rehash < tbl->size) |
869 | rht_dereference(ht->future_tbl, ht) == tbl) | ||
870 | list_add(&iter->walker->list, &tbl->walkers); | 866 | list_add(&iter->walker->list, &tbl->walkers); |
871 | else | 867 | else |
872 | iter->walker->tbl = NULL; | 868 | iter->walker->tbl = NULL; |
@@ -961,7 +957,6 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | |||
961 | atomic_set(&ht->nelems, 0); | 957 | atomic_set(&ht->nelems, 0); |
962 | 958 | ||
963 | RCU_INIT_POINTER(ht->tbl, tbl); | 959 | RCU_INIT_POINTER(ht->tbl, tbl); |
964 | RCU_INIT_POINTER(ht->future_tbl, tbl); | ||
965 | 960 | ||
966 | INIT_WORK(&ht->run_work, rht_deferred_worker); | 961 | INIT_WORK(&ht->run_work, rht_deferred_worker); |
967 | 962 | ||