aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2015-03-24 09:18:20 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-24 17:48:40 -0400
commit6b6f302ceda7a052dab545d6c69abf5f0d4a6cab (patch)
tree8464850fd94dba0dfa34c75c46dfb3fcf4c3c381
parentb5e2c150ac914f28a28833b57397bec0b0a2bd5f (diff)
rhashtable: Add rhashtable_free_and_destroy()
rhashtable_destroy() variant which stops rehashes, iterates over the table and calls a callback to release resources. Avoids need for nft_hash to embed rhashtable internals and allows to get rid of the being_destroyed flag. It also saves a 2nd mutex lock upon destruction. Also fixes an RCU lockdep splash on nft set destruction due to calling rht_for_each_entry_safe() without holding bucket locks. Open code this loop as we need know that no mutations may occur in parallel. Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/rhashtable.h5
-rw-r--r--lib/rhashtable.c49
-rw-r--r--net/netfilter/nft_hash.c25
3 files changed, 49 insertions, 30 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index ae26c494e230..99f2e49a8a07 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -136,12 +136,10 @@ struct rhashtable_params {
136 * @run_work: Deferred worker to expand/shrink asynchronously 136 * @run_work: Deferred worker to expand/shrink asynchronously
137 * @mutex: Mutex to protect current/future table swapping 137 * @mutex: Mutex to protect current/future table swapping
138 * @lock: Spin lock to protect walker list 138 * @lock: Spin lock to protect walker list
139 * @being_destroyed: True if table is set up for destruction
140 */ 139 */
141struct rhashtable { 140struct rhashtable {
142 struct bucket_table __rcu *tbl; 141 struct bucket_table __rcu *tbl;
143 atomic_t nelems; 142 atomic_t nelems;
144 bool being_destroyed;
145 unsigned int key_len; 143 unsigned int key_len;
146 unsigned int elasticity; 144 unsigned int elasticity;
147 struct rhashtable_params p; 145 struct rhashtable_params p;
@@ -334,6 +332,9 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
334void *rhashtable_walk_next(struct rhashtable_iter *iter); 332void *rhashtable_walk_next(struct rhashtable_iter *iter);
335void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); 333void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
336 334
335void rhashtable_free_and_destroy(struct rhashtable *ht,
336 void (*free_fn)(void *ptr, void *arg),
337 void *arg);
337void rhashtable_destroy(struct rhashtable *ht); 338void rhashtable_destroy(struct rhashtable *ht);
338 339
339#define rht_dereference(p, ht) \ 340#define rht_dereference(p, ht) \
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 50374d181148..4b7b7e672b93 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -359,8 +359,6 @@ static void rht_deferred_worker(struct work_struct *work)
359 359
360 ht = container_of(work, struct rhashtable, run_work); 360 ht = container_of(work, struct rhashtable, run_work);
361 mutex_lock(&ht->mutex); 361 mutex_lock(&ht->mutex);
362 if (ht->being_destroyed)
363 goto unlock;
364 362
365 tbl = rht_dereference(ht->tbl, ht); 363 tbl = rht_dereference(ht->tbl, ht);
366 tbl = rhashtable_last_table(ht, tbl); 364 tbl = rhashtable_last_table(ht, tbl);
@@ -372,7 +370,6 @@ static void rht_deferred_worker(struct work_struct *work)
372 370
373 err = rhashtable_rehash_table(ht); 371 err = rhashtable_rehash_table(ht);
374 372
375unlock:
376 mutex_unlock(&ht->mutex); 373 mutex_unlock(&ht->mutex);
377 374
378 if (err) 375 if (err)
@@ -783,21 +780,53 @@ int rhashtable_init(struct rhashtable *ht,
783EXPORT_SYMBOL_GPL(rhashtable_init); 780EXPORT_SYMBOL_GPL(rhashtable_init);
784 781
785/** 782/**
786 * rhashtable_destroy - destroy hash table 783 * rhashtable_free_and_destroy - free elements and destroy hash table
787 * @ht: the hash table to destroy 784 * @ht: the hash table to destroy
785 * @free_fn: callback to release resources of element
786 * @arg: pointer passed to free_fn
788 * 787 *
789 * Frees the bucket array. This function is not rcu safe, therefore the caller 788 * Stops an eventual async resize. If defined, invokes free_fn for each
790 * has to make sure that no resizing may happen by unpublishing the hashtable 789 * element to releasal resources. Please note that RCU protected
791 * and waiting for the quiescent cycle before releasing the bucket array. 790 * readers may still be accessing the elements. Releasing of resources
791 * must occur in a compatible manner. Then frees the bucket array.
792 *
793 * This function will eventually sleep to wait for an async resize
794 * to complete. The caller is responsible that no further write operations
795 * occurs in parallel.
792 */ 796 */
793void rhashtable_destroy(struct rhashtable *ht) 797void rhashtable_free_and_destroy(struct rhashtable *ht,
798 void (*free_fn)(void *ptr, void *arg),
799 void *arg)
794{ 800{
795 ht->being_destroyed = true; 801 const struct bucket_table *tbl;
802 unsigned int i;
796 803
797 cancel_work_sync(&ht->run_work); 804 cancel_work_sync(&ht->run_work);
798 805
799 mutex_lock(&ht->mutex); 806 mutex_lock(&ht->mutex);
800 bucket_table_free(rht_dereference(ht->tbl, ht)); 807 tbl = rht_dereference(ht->tbl, ht);
808 if (free_fn) {
809 for (i = 0; i < tbl->size; i++) {
810 struct rhash_head *pos, *next;
811
812 for (pos = rht_dereference(tbl->buckets[i], ht),
813 next = !rht_is_a_nulls(pos) ?
814 rht_dereference(pos->next, ht) : NULL;
815 !rht_is_a_nulls(pos);
816 pos = next,
817 next = !rht_is_a_nulls(pos) ?
818 rht_dereference(pos->next, ht) : NULL)
819 free_fn(rht_obj(ht, pos), arg);
820 }
821 }
822
823 bucket_table_free(tbl);
801 mutex_unlock(&ht->mutex); 824 mutex_unlock(&ht->mutex);
802} 825}
826EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
827
828void rhashtable_destroy(struct rhashtable *ht)
829{
830 return rhashtable_free_and_destroy(ht, NULL, NULL);
831}
803EXPORT_SYMBOL_GPL(rhashtable_destroy); 832EXPORT_SYMBOL_GPL(rhashtable_destroy);
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 8577a37af18b..f9ce2195fd63 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -188,26 +188,15 @@ static int nft_hash_init(const struct nft_set *set,
188 return rhashtable_init(priv, &params); 188 return rhashtable_init(priv, &params);
189} 189}
190 190
191static void nft_hash_destroy(const struct nft_set *set) 191static void nft_free_element(void *ptr, void *arg)
192{ 192{
193 struct rhashtable *priv = nft_set_priv(set); 193 nft_hash_elem_destroy((const struct nft_set *)arg, ptr);
194 const struct bucket_table *tbl; 194}
195 struct nft_hash_elem *he;
196 struct rhash_head *pos, *next;
197 unsigned int i;
198
199 /* Stop an eventual async resizing */
200 priv->being_destroyed = true;
201 mutex_lock(&priv->mutex);
202
203 tbl = rht_dereference(priv->tbl, priv);
204 for (i = 0; i < tbl->size; i++) {
205 rht_for_each_entry_safe(he, pos, next, tbl, i, node)
206 nft_hash_elem_destroy(set, he);
207 }
208 mutex_unlock(&priv->mutex);
209 195
210 rhashtable_destroy(priv); 196static void nft_hash_destroy(const struct nft_set *set)
197{
198 rhashtable_free_and_destroy(nft_set_priv(set), nft_free_element,
199 (void *)set);
211} 200}
212 201
213static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features, 202static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,