diff options
author | NeilBrown <neilb@suse.com> | 2019-04-11 21:52:08 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-04-12 20:34:45 -0400 |
commit | f4712b46a529ca2da078c82d5d99d367c7ebf82b (patch) | |
tree | 0e77f82fcf92c8483e192ecba4acb6ce8c5a86a8 | |
parent | adc6a3ab192eb40fb9d8b093c87d9aa785af4513 (diff) |
rhashtable: replace rht_ptr_locked() with rht_assign_locked()
The only times rht_ptr_locked() is used, it is to store a new
value in a bucket-head. This is the only time it makes sense
to use it too. So replace it by a function which does the
whole task: Sets the lock bit and assigns to a bucket head.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/rhashtable.h | 9 | ||||
-rw-r--r-- | lib/rhashtable.c | 6 |
2 files changed, 9 insertions, 6 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index b54e6436547e..882bc0fcea4b 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h | |||
@@ -316,6 +316,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert( | |||
316 | * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer() | 316 | * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer() |
317 | * provides the same release semantics that bit_spin_unlock() provides, | 317 | * provides the same release semantics that bit_spin_unlock() provides, |
318 | * this is safe. | 318 | * this is safe. |
319 | * When we write to a bucket without unlocking, we use rht_assign_locked(). | ||
319 | */ | 320 | */ |
320 | 321 | ||
321 | static inline void rht_lock(struct bucket_table *tbl, | 322 | static inline void rht_lock(struct bucket_table *tbl, |
@@ -369,10 +370,12 @@ static inline struct rhash_head *rht_ptr_exclusive( | |||
369 | return (void *)(((unsigned long)p) & ~BIT(1)); | 370 | return (void *)(((unsigned long)p) & ~BIT(1)); |
370 | } | 371 | } |
371 | 372 | ||
372 | static inline struct rhash_lock_head __rcu *rht_ptr_locked(const | 373 | static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, |
373 | struct rhash_head *p) | 374 | struct rhash_head *obj) |
374 | { | 375 | { |
375 | return (void *)(((unsigned long)p) | BIT(1)); | 376 | struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; |
377 | |||
378 | rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(1))); | ||
376 | } | 379 | } |
377 | 380 | ||
378 | static inline void rht_assign_unlock(struct bucket_table *tbl, | 381 | static inline void rht_assign_unlock(struct bucket_table *tbl, |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 237368ea98c5..ef5378efdef3 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -259,7 +259,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, | |||
259 | rcu_assign_pointer(*pprev, next); | 259 | rcu_assign_pointer(*pprev, next); |
260 | else | 260 | else |
261 | /* Need to preserved the bit lock. */ | 261 | /* Need to preserved the bit lock. */ |
262 | rcu_assign_pointer(*bkt, rht_ptr_locked(next)); | 262 | rht_assign_locked(bkt, next); |
263 | 263 | ||
264 | out: | 264 | out: |
265 | return err; | 265 | return err; |
@@ -517,7 +517,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, | |||
517 | rcu_assign_pointer(*pprev, obj); | 517 | rcu_assign_pointer(*pprev, obj); |
518 | else | 518 | else |
519 | /* Need to preserve the bit lock */ | 519 | /* Need to preserve the bit lock */ |
520 | rcu_assign_pointer(*bkt, rht_ptr_locked(obj)); | 520 | rht_assign_locked(bkt, obj); |
521 | 521 | ||
522 | return NULL; | 522 | return NULL; |
523 | } | 523 | } |
@@ -570,7 +570,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, | |||
570 | /* bkt is always the head of the list, so it holds | 570 | /* bkt is always the head of the list, so it holds |
571 | * the lock, which we need to preserve | 571 | * the lock, which we need to preserve |
572 | */ | 572 | */ |
573 | rcu_assign_pointer(*bkt, rht_ptr_locked(obj)); | 573 | rht_assign_locked(bkt, obj); |
574 | 574 | ||
575 | atomic_inc(&ht->nelems); | 575 | atomic_inc(&ht->nelems); |
576 | if (rht_grow_above_75(ht, tbl)) | 576 | if (rht_grow_above_75(ht, tbl)) |