aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-03-23 18:53:17 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-23 22:16:07 -0400
commitba7c95ea3870fe7b847466d39a049ab6f156aa2c (patch)
tree8fed9deb6a4a0c1f52634ff0bd07fb6a0edb8dde
parentce046c568cbfb4734583131086f88cfe993c01d0 (diff)
rhashtable: Fix sleeping inside RCU critical section in walk_stop
The commit 963ecbd41a1026d99ec7537c050867428c397b89 ("rhashtable: Fix use-after-free in rhashtable_walk_stop") fixed a real bug but created another one because we may end up sleeping inside an RCU critical section. This patch fixes it properly by replacing the mutex with a spin lock that specifically protects the walker lists. Reported-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/rhashtable.h2
-rw-r--r--lib/rhashtable.c7
2 files changed, 7 insertions, 2 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index f9ecf32bce55..d7be9cb0e91f 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -133,6 +133,7 @@ struct rhashtable_params {
133 * @p: Configuration parameters 133 * @p: Configuration parameters
134 * @run_work: Deferred worker to expand/shrink asynchronously 134 * @run_work: Deferred worker to expand/shrink asynchronously
135 * @mutex: Mutex to protect current/future table swapping 135 * @mutex: Mutex to protect current/future table swapping
136 * @lock: Spin lock to protect walker list
136 * @being_destroyed: True if table is set up for destruction 137 * @being_destroyed: True if table is set up for destruction
137 */ 138 */
138struct rhashtable { 139struct rhashtable {
@@ -144,6 +145,7 @@ struct rhashtable {
144 struct rhashtable_params p; 145 struct rhashtable_params p;
145 struct work_struct run_work; 146 struct work_struct run_work;
146 struct mutex mutex; 147 struct mutex mutex;
148 spinlock_t lock;
147}; 149};
148 150
149/** 151/**
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 7686c1e9934a..e96ad1a52c90 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -256,8 +256,10 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
256 /* Publish the new table pointer. */ 256 /* Publish the new table pointer. */
257 rcu_assign_pointer(ht->tbl, new_tbl); 257 rcu_assign_pointer(ht->tbl, new_tbl);
258 258
259 spin_lock(&ht->lock);
259 list_for_each_entry(walker, &old_tbl->walkers, list) 260 list_for_each_entry(walker, &old_tbl->walkers, list)
260 walker->tbl = NULL; 261 walker->tbl = NULL;
262 spin_unlock(&ht->lock);
261 263
262 /* Wait for readers. All new readers will see the new 264 /* Wait for readers. All new readers will see the new
263 * table, and thus no references to the old table will 265 * table, and thus no references to the old table will
@@ -635,12 +637,12 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
635 637
636 ht = iter->ht; 638 ht = iter->ht;
637 639
638 mutex_lock(&ht->mutex); 640 spin_lock(&ht->lock);
639 if (tbl->rehash < tbl->size) 641 if (tbl->rehash < tbl->size)
640 list_add(&iter->walker->list, &tbl->walkers); 642 list_add(&iter->walker->list, &tbl->walkers);
641 else 643 else
642 iter->walker->tbl = NULL; 644 iter->walker->tbl = NULL;
643 mutex_unlock(&ht->mutex); 645 spin_unlock(&ht->lock);
644 646
645 iter->p = NULL; 647 iter->p = NULL;
646 648
@@ -723,6 +725,7 @@ int rhashtable_init(struct rhashtable *ht,
723 725
724 memset(ht, 0, sizeof(*ht)); 726 memset(ht, 0, sizeof(*ht));
725 mutex_init(&ht->mutex); 727 mutex_init(&ht->mutex);
728 spin_lock_init(&ht->lock);
726 memcpy(&ht->p, params, sizeof(*params)); 729 memcpy(&ht->p, params, sizeof(*params));
727 730
728 if (params->min_size) 731 if (params->min_size)