summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-03-15 06:12:05 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-15 22:22:08 -0400
commit565e86404e4c40e03f602ef0d6d490328f28c493 (patch)
tree6be3f18d0d0311eb84062e18d30b771b14bb9f9e /lib
parent963ecbd41a1026d99ec7537c050867428c397b89 (diff)
rhashtable: Fix rhashtable_remove failures
The commit 9d901bc05153bbf33b5da2cd6266865e531f0545 ("rhashtable: Free bucket tables asynchronously after rehash") causes gratuitous failures in rhashtable_remove. The reason is that it inadvertently introduced multiple rehashing from the perspective of readers. IOW it is now possible to see more than two tables during a single RCU critical section. Fortunately the other reader rhashtable_lookup already deals with this correctly thanks to c4db8848af6af92f90462258603be844baeab44d ("rhashtable: rhashtable: Move future_tbl into struct bucket_table") so only rhashtable_remove is broken by this change. This patch fixes this by looping over every table from the first one to the last or until we find the element that we were trying to delete. Incidentally the simple test for detecting rehashing to prevent starting another shrinking no longer works. Since it isn't needed anyway (the work queue and the mutex serves as a natural barrier to unnecessary rehashes) I've simply killed the test. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index b916679b3e3b..c523d3a563aa 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -511,28 +511,25 @@ static bool __rhashtable_remove(struct rhashtable *ht,
511 */ 511 */
512bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) 512bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
513{ 513{
514 struct bucket_table *tbl, *old_tbl; 514 struct bucket_table *tbl;
515 bool ret; 515 bool ret;
516 516
517 rcu_read_lock(); 517 rcu_read_lock();
518 518
519 old_tbl = rht_dereference_rcu(ht->tbl, ht); 519 tbl = rht_dereference_rcu(ht->tbl, ht);
520 ret = __rhashtable_remove(ht, old_tbl, obj);
521 520
522 /* Because we have already taken (and released) the bucket 521 /* Because we have already taken (and released) the bucket
523 * lock in old_tbl, if we find that future_tbl is not yet 522 * lock in old_tbl, if we find that future_tbl is not yet
524 * visible then that guarantees the entry to still be in 523 * visible then that guarantees the entry to still be in
525 * old_tbl if it exists. 524 * the old tbl if it exists.
526 */ 525 */
527 tbl = rht_dereference_rcu(old_tbl->future_tbl, ht) ?: old_tbl; 526 while (!(ret = __rhashtable_remove(ht, tbl, obj)) &&
528 if (!ret && old_tbl != tbl) 527 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
529 ret = __rhashtable_remove(ht, tbl, obj); 528 ;
530 529
531 if (ret) { 530 if (ret) {
532 bool no_resize_running = tbl == old_tbl;
533
534 atomic_dec(&ht->nelems); 531 atomic_dec(&ht->nelems);
535 if (no_resize_running && rht_shrink_below_30(ht, tbl)) 532 if (rht_shrink_below_30(ht, tbl))
536 schedule_work(&ht->run_work); 533 schedule_work(&ht->run_work);
537 } 534 }
538 535