aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2015-02-04 20:03:36 -0500
committerDavid S. Miller <davem@davemloft.net>2015-02-06 18:18:35 -0500
commitcf52d52f9ccb9966ac019d9f79824195583e3e6c (patch)
tree709001bcb9373ca4d622f70d4c48095aea4d1a61 /lib
parent7cd10db8de2b6a32ccabef2e0e01c7444faa49d4 (diff)
rhashtable: Avoid bucket cross reference after removal
During a resize, when two buckets in the larger table map to a single bucket in the smaller table and the new table has already been (partially) linked to the old table. Removal of an element may result the bucket in the larger table to point to entries which all hash to a different value than the bucket index. Thus causing two buckets to point to the same sub chain after unzipping. This is not illegal *during* the resize phase but after it has completed. Keep the old table around until all of the unzipping is done to allow the removal code to only search for matching hashed entries during this special period. Reported-by: Ying Xue <ying.xue@windriver.com> Fixes: 97defe1ecf86 ("rhashtable: Per bucket locks & deferred expansion/shrinking") Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index ef0816b6be82..5919d63f58e4 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -415,12 +415,6 @@ int rhashtable_expand(struct rhashtable *ht)
415 unlock_buckets(new_tbl, old_tbl, new_hash); 415 unlock_buckets(new_tbl, old_tbl, new_hash);
416 } 416 }
417 417
418 /* Publish the new table pointer. Lookups may now traverse
419 * the new table, but they will not benefit from any
420 * additional efficiency until later steps unzip the buckets.
421 */
422 rcu_assign_pointer(ht->tbl, new_tbl);
423
424 /* Unzip interleaved hash chains */ 418 /* Unzip interleaved hash chains */
425 while (!complete && !ht->being_destroyed) { 419 while (!complete && !ht->being_destroyed) {
426 /* Wait for readers. All new readers will see the new 420 /* Wait for readers. All new readers will see the new
@@ -445,6 +439,7 @@ int rhashtable_expand(struct rhashtable *ht)
445 } 439 }
446 } 440 }
447 441
442 rcu_assign_pointer(ht->tbl, new_tbl);
448 synchronize_rcu(); 443 synchronize_rcu();
449 444
450 bucket_table_free(old_tbl); 445 bucket_table_free(old_tbl);
@@ -627,14 +622,14 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
627{ 622{
628 struct bucket_table *tbl, *new_tbl, *old_tbl; 623 struct bucket_table *tbl, *new_tbl, *old_tbl;
629 struct rhash_head __rcu **pprev; 624 struct rhash_head __rcu **pprev;
630 struct rhash_head *he; 625 struct rhash_head *he, *he2;
631 unsigned int hash, new_hash; 626 unsigned int hash, new_hash;
632 bool ret = false; 627 bool ret = false;
633 628
634 rcu_read_lock(); 629 rcu_read_lock();
635 tbl = old_tbl = rht_dereference_rcu(ht->tbl, ht); 630 tbl = old_tbl = rht_dereference_rcu(ht->tbl, ht);
636 new_tbl = rht_dereference_rcu(ht->future_tbl, ht); 631 new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
637 new_hash = head_hashfn(ht, new_tbl, obj); 632 new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
638 633
639 lock_buckets(new_tbl, old_tbl, new_hash); 634 lock_buckets(new_tbl, old_tbl, new_hash);
640restart: 635restart:
@@ -647,8 +642,21 @@ restart:
647 } 642 }
648 643
649 ASSERT_BUCKET_LOCK(ht, tbl, hash); 644 ASSERT_BUCKET_LOCK(ht, tbl, hash);
650 rcu_assign_pointer(*pprev, obj->next);
651 645
646 if (unlikely(new_tbl != tbl)) {
647 rht_for_each_continue(he2, he->next, tbl, hash) {
648 if (head_hashfn(ht, tbl, he2) == hash) {
649 rcu_assign_pointer(*pprev, he2);
650 goto found;
651 }
652 }
653
654 INIT_RHT_NULLS_HEAD(*pprev, ht, hash);
655 } else {
656 rcu_assign_pointer(*pprev, obj->next);
657 }
658
659found:
652 ret = true; 660 ret = true;
653 break; 661 break;
654 } 662 }