aboutsummaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2015-02-06 11:08:43 -0500
committerDavid S. Miller <davem@davemloft.net>2015-02-06 18:19:17 -0500
commit020219a69d40a205dad12b0ea1e6a46153793368 (patch)
tree0dec904a1fcc46dc6758c673afda340bf1da4475 /lib/rhashtable.c
parent41e8b206f8df6daf90e132a73fc2c60f51a36c6b (diff)
rhashtable: Fix remove logic to avoid cross references between buckets
The remove logic properly searched the remaining chain for a matching entry with an identical hash but it did this while searching from both the old and new table. Instead in order to not leave stale references behind we need to: 1. When growing and searching from the new table: Search remaining chain for entry with same hash to avoid having the new table directly point to a entry with a different hash. 2. When shrinking and searching from the old table: Check if the element after the removed would create a cross reference and avoid it if so. These bugs were present from the beginning in nft_hash. Also, both insert functions calculated the hash based on the mask of the new table. This worked while growing. Wwhile shrinking, the mask of the inew table is smaller than the mask of the old table. This lead to a bit not being taken into account when selecting the bucket lock and thus caused the wrong bucket to be locked eventually. Fixes: 7e1e77636e36 ("lib: Resizable, Scalable, Concurrent Hash Table") Fixes: 97defe1ecf86 ("rhashtable: Per bucket locks & deferred expansion/shrinking") Reported-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 5919d63f58e4..e96fc00208bc 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -552,8 +552,10 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht)
552static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, 552static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
553 struct bucket_table *tbl, u32 hash) 553 struct bucket_table *tbl, u32 hash)
554{ 554{
555 struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash], 555 struct rhash_head *head;
556 tbl, hash); 556
557 hash = rht_bucket_index(tbl, hash);
558 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
557 559
558 ASSERT_BUCKET_LOCK(ht, tbl, hash); 560 ASSERT_BUCKET_LOCK(ht, tbl, hash);
559 561
@@ -593,7 +595,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
593 595
594 tbl = rht_dereference_rcu(ht->future_tbl, ht); 596 tbl = rht_dereference_rcu(ht->future_tbl, ht);
595 old_tbl = rht_dereference_rcu(ht->tbl, ht); 597 old_tbl = rht_dereference_rcu(ht->tbl, ht);
596 hash = head_hashfn(ht, tbl, obj); 598 hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
597 599
598 lock_buckets(tbl, old_tbl, hash); 600 lock_buckets(tbl, old_tbl, hash);
599 __rhashtable_insert(ht, obj, tbl, hash); 601 __rhashtable_insert(ht, obj, tbl, hash);
@@ -627,8 +629,8 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
627 bool ret = false; 629 bool ret = false;
628 630
629 rcu_read_lock(); 631 rcu_read_lock();
630 tbl = old_tbl = rht_dereference_rcu(ht->tbl, ht); 632 old_tbl = rht_dereference_rcu(ht->tbl, ht);
631 new_tbl = rht_dereference_rcu(ht->future_tbl, ht); 633 tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
632 new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); 634 new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
633 635
634 lock_buckets(new_tbl, old_tbl, new_hash); 636 lock_buckets(new_tbl, old_tbl, new_hash);
@@ -643,15 +645,19 @@ restart:
643 645
644 ASSERT_BUCKET_LOCK(ht, tbl, hash); 646 ASSERT_BUCKET_LOCK(ht, tbl, hash);
645 647
646 if (unlikely(new_tbl != tbl)) { 648 if (old_tbl->size > new_tbl->size && tbl == old_tbl &&
647 rht_for_each_continue(he2, he->next, tbl, hash) { 649 !rht_is_a_nulls(obj->next) &&
650 head_hashfn(ht, tbl, obj->next) != hash) {
651 rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
652 } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) {
653 rht_for_each_continue(he2, obj->next, tbl, hash) {
648 if (head_hashfn(ht, tbl, he2) == hash) { 654 if (head_hashfn(ht, tbl, he2) == hash) {
649 rcu_assign_pointer(*pprev, he2); 655 rcu_assign_pointer(*pprev, he2);
650 goto found; 656 goto found;
651 } 657 }
652 } 658 }
653 659
654 INIT_RHT_NULLS_HEAD(*pprev, ht, hash); 660 rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
655 } else { 661 } else {
656 rcu_assign_pointer(*pprev, obj->next); 662 rcu_assign_pointer(*pprev, obj->next);
657 } 663 }
@@ -666,8 +672,8 @@ found:
666 * resizing. Thus traversing both is fine and the added cost is 672 * resizing. Thus traversing both is fine and the added cost is
667 * very rare. 673 * very rare.
668 */ 674 */
669 if (tbl != new_tbl) { 675 if (tbl != old_tbl) {
670 tbl = new_tbl; 676 tbl = old_tbl;
671 goto restart; 677 goto restart;
672 } 678 }
673 679
@@ -835,7 +841,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
835 rcu_read_lock(); 841 rcu_read_lock();
836 old_tbl = rht_dereference_rcu(ht->tbl, ht); 842 old_tbl = rht_dereference_rcu(ht->tbl, ht);
837 new_tbl = rht_dereference_rcu(ht->future_tbl, ht); 843 new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
838 new_hash = head_hashfn(ht, new_tbl, obj); 844 new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
839 845
840 lock_buckets(new_tbl, old_tbl, new_hash); 846 lock_buckets(new_tbl, old_tbl, new_hash);
841 847