summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rhashtable.h9
-rw-r--r--lib/rhashtable.c6
2 files changed, 9 insertions, 6 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index b54e6436547e..882bc0fcea4b 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -316,6 +316,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
316 * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer() 316 * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer()
317 * provides the same release semantics that bit_spin_unlock() provides, 317 * provides the same release semantics that bit_spin_unlock() provides,
318 * this is safe. 318 * this is safe.
319 * When we write to a bucket without unlocking, we use rht_assign_locked().
319 */ 320 */
320 321
321static inline void rht_lock(struct bucket_table *tbl, 322static inline void rht_lock(struct bucket_table *tbl,
@@ -369,10 +370,12 @@ static inline struct rhash_head *rht_ptr_exclusive(
369 return (void *)(((unsigned long)p) & ~BIT(1)); 370 return (void *)(((unsigned long)p) & ~BIT(1));
370} 371}
371 372
372static inline struct rhash_lock_head __rcu *rht_ptr_locked(const 373static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
373 struct rhash_head *p) 374 struct rhash_head *obj)
374{ 375{
375 return (void *)(((unsigned long)p) | BIT(1)); 376 struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
377
378 rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(1)));
376} 379}
377 380
378static inline void rht_assign_unlock(struct bucket_table *tbl, 381static inline void rht_assign_unlock(struct bucket_table *tbl,
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 237368ea98c5..ef5378efdef3 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -259,7 +259,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
259 rcu_assign_pointer(*pprev, next); 259 rcu_assign_pointer(*pprev, next);
260 else 260 else
261 /* Need to preserved the bit lock. */ 261 /* Need to preserved the bit lock. */
262 rcu_assign_pointer(*bkt, rht_ptr_locked(next)); 262 rht_assign_locked(bkt, next);
263 263
264out: 264out:
265 return err; 265 return err;
@@ -517,7 +517,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
517 rcu_assign_pointer(*pprev, obj); 517 rcu_assign_pointer(*pprev, obj);
518 else 518 else
519 /* Need to preserve the bit lock */ 519 /* Need to preserve the bit lock */
520 rcu_assign_pointer(*bkt, rht_ptr_locked(obj)); 520 rht_assign_locked(bkt, obj);
521 521
522 return NULL; 522 return NULL;
523 } 523 }
@@ -570,7 +570,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
570 /* bkt is always the head of the list, so it holds 570 /* bkt is always the head of the list, so it holds
571 * the lock, which we need to preserve 571 * the lock, which we need to preserve
572 */ 572 */
573 rcu_assign_pointer(*bkt, rht_ptr_locked(obj)); 573 rht_assign_locked(bkt, obj);
574 574
575 atomic_inc(&ht->nelems); 575 atomic_inc(&ht->nelems);
576 if (rht_grow_above_75(ht, tbl)) 576 if (rht_grow_above_75(ht, tbl))