aboutsummaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2015-01-21 06:54:01 -0500
committerDavid S. Miller <davem@davemloft.net>2015-01-26 14:56:34 -0500
commitfe6a043c535acfec8f8e554536c87923dcb45097 (patch)
tree77f06c370e355b2a1ec6c04ea104983625bd8d7b /lib/rhashtable.c
parent1dc7b90f7cd050ef6d5e511e652347e52874469c (diff)
rhashtable: rhashtable_remove() must unlink in both tbl and future_tbl
As removals can occur during resizes, entries may be referred to from both tbl and future_tbl when the removal is requested. Therefore rhashtable_remove() must unlink the entry in both tables if this is the case. The existing code did search both tables but stopped when it hit the first match. Failing to unlink in both tables resulted in use after free. Fixes: 97defe1ecf86 ("rhashtable: Per bucket locks & deferred expansion/shrinking") Reported-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 84a78e396a56..bc2d0d80d1f9 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -585,6 +585,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
585 struct rhash_head *he; 585 struct rhash_head *he;
586 spinlock_t *lock; 586 spinlock_t *lock;
587 unsigned int hash; 587 unsigned int hash;
588 bool ret = false;
588 589
589 rcu_read_lock(); 590 rcu_read_lock();
590 tbl = rht_dereference_rcu(ht->tbl, ht); 591 tbl = rht_dereference_rcu(ht->tbl, ht);
@@ -602,17 +603,16 @@ restart:
602 } 603 }
603 604
604 rcu_assign_pointer(*pprev, obj->next); 605 rcu_assign_pointer(*pprev, obj->next);
605 atomic_dec(&ht->nelems);
606
607 spin_unlock_bh(lock);
608
609 rhashtable_wakeup_worker(ht);
610
611 rcu_read_unlock();
612 606
613 return true; 607 ret = true;
608 break;
614 } 609 }
615 610
611 /* The entry may be linked in either 'tbl', 'future_tbl', or both.
612 * 'future_tbl' only exists for a short period of time during
613 * resizing. Thus traversing both is fine and the added cost is
614 * very rare.
615 */
616 if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) { 616 if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) {
617 spin_unlock_bh(lock); 617 spin_unlock_bh(lock);
618 618
@@ -625,9 +625,15 @@ restart:
625 } 625 }
626 626
627 spin_unlock_bh(lock); 627 spin_unlock_bh(lock);
628
629 if (ret) {
630 atomic_dec(&ht->nelems);
631 rhashtable_wakeup_worker(ht);
632 }
633
628 rcu_read_unlock(); 634 rcu_read_unlock();
629 635
630 return false; 636 return ret;
631} 637}
632EXPORT_SYMBOL_GPL(rhashtable_remove); 638EXPORT_SYMBOL_GPL(rhashtable_remove);
633 639