diff options
author | David S. Miller <davem@davemloft.net> | 2015-03-15 22:22:17 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-15 22:22:17 -0400 |
commit | 7993d44ea1f7b17dd17863ab139d2c9df17dfe51 (patch) | |
tree | 6be3f18d0d0311eb84062e18d30b771b14bb9f9e | |
parent | 0034de4193e4aad30bbbef4e74ca5e0631ba08a7 (diff) | |
parent | 565e86404e4c40e03f602ef0d6d490328f28c493 (diff) |
Merge branch 'rhashtable-fixes-next'
Herbert Xu says:
====================
rhashtable: Fix two bugs caused by multiple rehash preparation
While testing some new patches over the weekend I discovered a
couple of bugs in the series that had just been merged. These
two patches fix them:
1) A use-after-free in the walker that can cause crashes when
walking during a rehash.
2) When a second rehash starts during a single rhashtable_remove
call the remove may fail when it shouldn't.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | lib/rhashtable.c | 24 |
1 files changed, 11 insertions, 13 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 9d53a46dcca9..c523d3a563aa 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -511,28 +511,25 @@ static bool __rhashtable_remove(struct rhashtable *ht, | |||
511 | */ | 511 | */ |
512 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) | 512 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) |
513 | { | 513 | { |
514 | struct bucket_table *tbl, *old_tbl; | 514 | struct bucket_table *tbl; |
515 | bool ret; | 515 | bool ret; |
516 | 516 | ||
517 | rcu_read_lock(); | 517 | rcu_read_lock(); |
518 | 518 | ||
519 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | 519 | tbl = rht_dereference_rcu(ht->tbl, ht); |
520 | ret = __rhashtable_remove(ht, old_tbl, obj); | ||
521 | 520 | ||
522 | /* Because we have already taken (and released) the bucket | 521 | /* Because we have already taken (and released) the bucket |
523 | * lock in old_tbl, if we find that future_tbl is not yet | 522 | * lock in old_tbl, if we find that future_tbl is not yet |
524 | * visible then that guarantees the entry to still be in | 523 | * visible then that guarantees the entry to still be in |
525 | * old_tbl if it exists. | 524 | * the old tbl if it exists. |
526 | */ | 525 | */ |
527 | tbl = rht_dereference_rcu(old_tbl->future_tbl, ht) ?: old_tbl; | 526 | while (!(ret = __rhashtable_remove(ht, tbl, obj)) && |
528 | if (!ret && old_tbl != tbl) | 527 | (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) |
529 | ret = __rhashtable_remove(ht, tbl, obj); | 528 | ; |
530 | 529 | ||
531 | if (ret) { | 530 | if (ret) { |
532 | bool no_resize_running = tbl == old_tbl; | ||
533 | |||
534 | atomic_dec(&ht->nelems); | 531 | atomic_dec(&ht->nelems); |
535 | if (no_resize_running && rht_shrink_below_30(ht, tbl)) | 532 | if (rht_shrink_below_30(ht, tbl)) |
536 | schedule_work(&ht->run_work); | 533 | schedule_work(&ht->run_work); |
537 | } | 534 | } |
538 | 535 | ||
@@ -854,10 +851,8 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter) | |||
854 | struct rhashtable *ht; | 851 | struct rhashtable *ht; |
855 | struct bucket_table *tbl = iter->walker->tbl; | 852 | struct bucket_table *tbl = iter->walker->tbl; |
856 | 853 | ||
857 | rcu_read_unlock(); | ||
858 | |||
859 | if (!tbl) | 854 | if (!tbl) |
860 | return; | 855 | goto out; |
861 | 856 | ||
862 | ht = iter->ht; | 857 | ht = iter->ht; |
863 | 858 | ||
@@ -869,6 +864,9 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter) | |||
869 | mutex_unlock(&ht->mutex); | 864 | mutex_unlock(&ht->mutex); |
870 | 865 | ||
871 | iter->p = NULL; | 866 | iter->p = NULL; |
867 | |||
868 | out: | ||
869 | rcu_read_unlock(); | ||
872 | } | 870 | } |
873 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); | 871 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); |
874 | 872 | ||