summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rhashtable-types.h2
-rw-r--r--include/linux/rhashtable.h261
-rw-r--r--ipc/util.c1
-rw-r--r--lib/rhashtable.c141
-rw-r--r--lib/test_rhashtable.c2
-rw-r--r--net/bridge/br_fdb.c1
-rw-r--r--net/bridge/br_multicast.c1
-rw-r--r--net/bridge/br_vlan.c1
-rw-r--r--net/bridge/br_vlan_tunnel.c1
-rw-r--r--net/ipv4/ipmr.c1
-rw-r--r--net/ipv6/ip6mr.c1
-rw-r--r--net/netfilter/nf_tables_api.c1
12 files changed, 236 insertions, 178 deletions
diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h
index 763d613ce2c2..57467cbf4c5b 100644
--- a/include/linux/rhashtable-types.h
+++ b/include/linux/rhashtable-types.h
@@ -48,7 +48,6 @@ typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
48 * @head_offset: Offset of rhash_head in struct to be hashed 48 * @head_offset: Offset of rhash_head in struct to be hashed
49 * @max_size: Maximum size while expanding 49 * @max_size: Maximum size while expanding
50 * @min_size: Minimum size while shrinking 50 * @min_size: Minimum size while shrinking
51 * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
52 * @automatic_shrinking: Enable automatic shrinking of tables 51 * @automatic_shrinking: Enable automatic shrinking of tables
53 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) 52 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
54 * @obj_hashfn: Function to hash object 53 * @obj_hashfn: Function to hash object
@@ -62,7 +61,6 @@ struct rhashtable_params {
62 unsigned int max_size; 61 unsigned int max_size;
63 u16 min_size; 62 u16 min_size;
64 bool automatic_shrinking; 63 bool automatic_shrinking;
65 u8 locks_mul;
66 rht_hashfn_t hashfn; 64 rht_hashfn_t hashfn;
67 rht_obj_hashfn_t obj_hashfn; 65 rht_obj_hashfn_t obj_hashfn;
68 rht_obj_cmpfn_t obj_cmpfn; 66 rht_obj_cmpfn_t obj_cmpfn;
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 0c9175aeab8a..ccbbafdf5547 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -24,12 +24,27 @@
24#include <linux/list_nulls.h> 24#include <linux/list_nulls.h>
25#include <linux/workqueue.h> 25#include <linux/workqueue.h>
26#include <linux/rculist.h> 26#include <linux/rculist.h>
27#include <linux/bit_spinlock.h>
27 28
28#include <linux/rhashtable-types.h> 29#include <linux/rhashtable-types.h>
29/* 30/*
31 * Objects in an rhashtable have an embedded struct rhash_head
32 * which is linked into as hash chain from the hash table - or one
33 * of two or more hash tables when the rhashtable is being resized.
30 * The end of the chain is marked with a special nulls marks which has 34 * The end of the chain is marked with a special nulls marks which has
31 * the least significant bit set. 35 * the least significant bit set but otherwise stores the address of
36 * the hash bucket. This allows us to be be sure we've found the end
37 * of the right list.
38 * The value stored in the hash bucket has BIT(2) used as a lock bit.
39 * This bit must be atomically set before any changes are made to
40 * the chain. To avoid dereferencing this pointer without clearing
41 * the bit first, we use an opaque 'struct rhash_lock_head *' for the
42 * pointer stored in the bucket. This struct needs to be defined so
43 * that rcu_derefernce() works on it, but it has no content so a
44 * cast is needed for it to be useful. This ensures it isn't
45 * used by mistake with clearing the lock bit first.
32 */ 46 */
47struct rhash_lock_head {};
33 48
34/* Maximum chain length before rehash 49/* Maximum chain length before rehash
35 * 50 *
@@ -52,8 +67,6 @@
52 * @nest: Number of bits of first-level nested table. 67 * @nest: Number of bits of first-level nested table.
53 * @rehash: Current bucket being rehashed 68 * @rehash: Current bucket being rehashed
54 * @hash_rnd: Random seed to fold into hash 69 * @hash_rnd: Random seed to fold into hash
55 * @locks_mask: Mask to apply before accessing locks[]
56 * @locks: Array of spinlocks protecting individual buckets
57 * @walkers: List of active walkers 70 * @walkers: List of active walkers
58 * @rcu: RCU structure for freeing the table 71 * @rcu: RCU structure for freeing the table
59 * @future_tbl: Table under construction during rehashing 72 * @future_tbl: Table under construction during rehashing
@@ -64,17 +77,71 @@ struct bucket_table {
64 unsigned int size; 77 unsigned int size;
65 unsigned int nest; 78 unsigned int nest;
66 u32 hash_rnd; 79 u32 hash_rnd;
67 unsigned int locks_mask;
68 spinlock_t *locks;
69 struct list_head walkers; 80 struct list_head walkers;
70 struct rcu_head rcu; 81 struct rcu_head rcu;
71 82
72 struct bucket_table __rcu *future_tbl; 83 struct bucket_table __rcu *future_tbl;
73 84
74 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; 85 struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
75}; 86};
76 87
77/* 88/*
89 * We lock a bucket by setting BIT(1) in the pointer - this is always
90 * zero in real pointers and in the nulls marker.
91 * bit_spin_locks do not handle contention well, but the whole point
92 * of the hashtable design is to achieve minimum per-bucket contention.
93 * A nested hash table might not have a bucket pointer. In that case
94 * we cannot get a lock. For remove and replace the bucket cannot be
95 * interesting and doesn't need locking.
96 * For insert we allocate the bucket if this is the last bucket_table,
97 * and then take the lock.
98 * Sometimes we unlock a bucket by writing a new pointer there. In that
99 * case we don't need to unlock, but we do need to reset state such as
100 * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer()
101 * provides the same release semantics that bit_spin_unlock() provides,
102 * this is safe.
103 */
104
105static inline void rht_lock(struct rhash_lock_head **bkt)
106{
107 local_bh_disable();
108 bit_spin_lock(1, (unsigned long *)bkt);
109}
110
111static inline void rht_unlock(struct rhash_lock_head **bkt)
112{
113 bit_spin_unlock(1, (unsigned long *)bkt);
114 local_bh_enable();
115}
116
117static inline void rht_assign_unlock(struct rhash_lock_head **bkt,
118 struct rhash_head *obj)
119{
120 struct rhash_head **p = (struct rhash_head **)bkt;
121
122 rcu_assign_pointer(*p, obj);
123 preempt_enable();
124 __release(bitlock);
125 local_bh_enable();
126}
127
128/*
129 * If 'p' is a bucket head and might be locked:
130 * rht_ptr() returns the address without the lock bit.
131 * rht_ptr_locked() returns the address WITH the lock bit.
132 */
133static inline struct rhash_head __rcu *rht_ptr(const struct rhash_lock_head *p)
134{
135 return (void *)(((unsigned long)p) & ~BIT(1));
136}
137
138static inline struct rhash_lock_head __rcu *rht_ptr_locked(const
139 struct rhash_head *p)
140{
141 return (void *)(((unsigned long)p) | BIT(1));
142}
143
144/*
78 * NULLS_MARKER() expects a hash value with the low 145 * NULLS_MARKER() expects a hash value with the low
79 * bits mostly likely to be significant, and it discards 146 * bits mostly likely to be significant, and it discards
80 * the msb. 147 * the msb.
@@ -206,25 +273,6 @@ static inline bool rht_grow_above_max(const struct rhashtable *ht,
206 return atomic_read(&ht->nelems) >= ht->max_elems; 273 return atomic_read(&ht->nelems) >= ht->max_elems;
207} 274}
208 275
209/* The bucket lock is selected based on the hash and protects mutations
210 * on a group of hash buckets.
211 *
212 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
213 * a single lock always covers both buckets which may both contains
214 * entries which link to the same bucket of the old table during resizing.
215 * This allows to simplify the locking as locking the bucket in both
216 * tables during resize always guarantee protection.
217 *
218 * IMPORTANT: When holding the bucket lock of both the old and new table
219 * during expansions and shrinking, the old bucket lock must always be
220 * acquired first.
221 */
222static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
223 unsigned int hash)
224{
225 return &tbl->locks[hash & tbl->locks_mask];
226}
227
228#ifdef CONFIG_PROVE_LOCKING 276#ifdef CONFIG_PROVE_LOCKING
229int lockdep_rht_mutex_is_held(struct rhashtable *ht); 277int lockdep_rht_mutex_is_held(struct rhashtable *ht);
230int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); 278int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
@@ -263,13 +311,13 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
263 void *arg); 311 void *arg);
264void rhashtable_destroy(struct rhashtable *ht); 312void rhashtable_destroy(struct rhashtable *ht);
265 313
266struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, 314struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
267 unsigned int hash); 315 unsigned int hash);
268struct rhash_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl, 316struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
269 unsigned int hash);
270struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
271 struct bucket_table *tbl,
272 unsigned int hash); 317 unsigned int hash);
318struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
319 struct bucket_table *tbl,
320 unsigned int hash);
273 321
274#define rht_dereference(p, ht) \ 322#define rht_dereference(p, ht) \
275 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) 323 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -286,21 +334,21 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
286#define rht_entry(tpos, pos, member) \ 334#define rht_entry(tpos, pos, member) \
287 ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) 335 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
288 336
289static inline struct rhash_head __rcu *const *rht_bucket( 337static inline struct rhash_lock_head __rcu *const *rht_bucket(
290 const struct bucket_table *tbl, unsigned int hash) 338 const struct bucket_table *tbl, unsigned int hash)
291{ 339{
292 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : 340 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
293 &tbl->buckets[hash]; 341 &tbl->buckets[hash];
294} 342}
295 343
296static inline struct rhash_head __rcu **rht_bucket_var( 344static inline struct rhash_lock_head __rcu **rht_bucket_var(
297 struct bucket_table *tbl, unsigned int hash) 345 struct bucket_table *tbl, unsigned int hash)
298{ 346{
299 return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : 347 return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
300 &tbl->buckets[hash]; 348 &tbl->buckets[hash];
301} 349}
302 350
303static inline struct rhash_head __rcu **rht_bucket_insert( 351static inline struct rhash_lock_head __rcu **rht_bucket_insert(
304 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) 352 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
305{ 353{
306 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : 354 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
@@ -326,7 +374,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
326 * @hash: the hash value / bucket index 374 * @hash: the hash value / bucket index
327 */ 375 */
328#define rht_for_each(pos, tbl, hash) \ 376#define rht_for_each(pos, tbl, hash) \
329 rht_for_each_from(pos, *rht_bucket(tbl, hash), tbl, hash) 377 rht_for_each_from(pos, rht_ptr(*rht_bucket(tbl, hash)), tbl, hash)
330 378
331/** 379/**
332 * rht_for_each_entry_from - iterate over hash chain from given head 380 * rht_for_each_entry_from - iterate over hash chain from given head
@@ -351,7 +399,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
351 * @member: name of the &struct rhash_head within the hashable struct. 399 * @member: name of the &struct rhash_head within the hashable struct.
352 */ 400 */
353#define rht_for_each_entry(tpos, pos, tbl, hash, member) \ 401#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
354 rht_for_each_entry_from(tpos, pos, *rht_bucket(tbl, hash), \ 402 rht_for_each_entry_from(tpos, pos, rht_ptr(*rht_bucket(tbl, hash)), \
355 tbl, hash, member) 403 tbl, hash, member)
356 404
357/** 405/**
@@ -367,7 +415,8 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
367 * remove the loop cursor from the list. 415 * remove the loop cursor from the list.
368 */ 416 */
369#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ 417#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
370 for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \ 418 for (pos = rht_dereference_bucket(rht_ptr(*rht_bucket(tbl, hash)), \
419 tbl, hash), \
371 next = !rht_is_a_nulls(pos) ? \ 420 next = !rht_is_a_nulls(pos) ? \
372 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ 421 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
373 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ 422 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
@@ -402,8 +451,12 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
402 * the _rcu mutation primitives such as rhashtable_insert() as long as the 451 * the _rcu mutation primitives such as rhashtable_insert() as long as the
403 * traversal is guarded by rcu_read_lock(). 452 * traversal is guarded by rcu_read_lock().
404 */ 453 */
405#define rht_for_each_rcu(pos, tbl, hash) \ 454#define rht_for_each_rcu(pos, tbl, hash) \
406 rht_for_each_rcu_from(pos, *rht_bucket(tbl, hash), tbl, hash) 455 for (({barrier(); }), \
456 pos = rht_ptr(rht_dereference_bucket_rcu( \
457 *rht_bucket(tbl, hash), tbl, hash)); \
458 !rht_is_a_nulls(pos); \
459 pos = rcu_dereference_raw(pos->next))
407 460
408/** 461/**
409 * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head 462 * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
@@ -437,7 +490,8 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
437 * traversal is guarded by rcu_read_lock(). 490 * traversal is guarded by rcu_read_lock().
438 */ 491 */
439#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ 492#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
440 rht_for_each_entry_rcu_from(tpos, pos, *rht_bucket(tbl, hash), \ 493 rht_for_each_entry_rcu_from(tpos, pos, \
494 rht_ptr(*rht_bucket(tbl, hash)), \
441 tbl, hash, member) 495 tbl, hash, member)
442 496
443/** 497/**
@@ -483,7 +537,7 @@ static inline struct rhash_head *__rhashtable_lookup(
483 .ht = ht, 537 .ht = ht,
484 .key = key, 538 .key = key,
485 }; 539 };
486 struct rhash_head __rcu * const *head; 540 struct rhash_lock_head __rcu * const *bkt;
487 struct bucket_table *tbl; 541 struct bucket_table *tbl;
488 struct rhash_head *he; 542 struct rhash_head *he;
489 unsigned int hash; 543 unsigned int hash;
@@ -491,9 +545,10 @@ static inline struct rhash_head *__rhashtable_lookup(
491 tbl = rht_dereference_rcu(ht->tbl, ht); 545 tbl = rht_dereference_rcu(ht->tbl, ht);
492restart: 546restart:
493 hash = rht_key_hashfn(ht, tbl, key, params); 547 hash = rht_key_hashfn(ht, tbl, key, params);
494 head = rht_bucket(tbl, hash); 548 bkt = rht_bucket(tbl, hash);
495 do { 549 do {
496 rht_for_each_rcu_from(he, *head, tbl, hash) { 550 he = rht_ptr(rht_dereference_bucket_rcu(*bkt, tbl, hash));
551 rht_for_each_rcu_from(he, he, tbl, hash) {
497 if (params.obj_cmpfn ? 552 if (params.obj_cmpfn ?
498 params.obj_cmpfn(&arg, rht_obj(ht, he)) : 553 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
499 rhashtable_compare(&arg, rht_obj(ht, he))) 554 rhashtable_compare(&arg, rht_obj(ht, he)))
@@ -503,7 +558,7 @@ restart:
503 /* An object might have been moved to a different hash chain, 558 /* An object might have been moved to a different hash chain,
504 * while we walk along it - better check and retry. 559 * while we walk along it - better check and retry.
505 */ 560 */
506 } while (he != RHT_NULLS_MARKER(head)); 561 } while (he != RHT_NULLS_MARKER(bkt));
507 562
508 /* Ensure we see any new tables. */ 563 /* Ensure we see any new tables. */
509 smp_rmb(); 564 smp_rmb();
@@ -599,10 +654,10 @@ static inline void *__rhashtable_insert_fast(
599 .ht = ht, 654 .ht = ht,
600 .key = key, 655 .key = key,
601 }; 656 };
657 struct rhash_lock_head __rcu **bkt;
602 struct rhash_head __rcu **pprev; 658 struct rhash_head __rcu **pprev;
603 struct bucket_table *tbl; 659 struct bucket_table *tbl;
604 struct rhash_head *head; 660 struct rhash_head *head;
605 spinlock_t *lock;
606 unsigned int hash; 661 unsigned int hash;
607 int elasticity; 662 int elasticity;
608 void *data; 663 void *data;
@@ -611,23 +666,22 @@ static inline void *__rhashtable_insert_fast(
611 666
612 tbl = rht_dereference_rcu(ht->tbl, ht); 667 tbl = rht_dereference_rcu(ht->tbl, ht);
613 hash = rht_head_hashfn(ht, tbl, obj, params); 668 hash = rht_head_hashfn(ht, tbl, obj, params);
614 lock = rht_bucket_lock(tbl, hash); 669 elasticity = RHT_ELASTICITY;
615 spin_lock_bh(lock); 670 bkt = rht_bucket_insert(ht, tbl, hash);
671 data = ERR_PTR(-ENOMEM);
672 if (!bkt)
673 goto out;
674 pprev = NULL;
675 rht_lock(bkt);
616 676
617 if (unlikely(rcu_access_pointer(tbl->future_tbl))) { 677 if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
618slow_path: 678slow_path:
619 spin_unlock_bh(lock); 679 rht_unlock(bkt);
620 rcu_read_unlock(); 680 rcu_read_unlock();
621 return rhashtable_insert_slow(ht, key, obj); 681 return rhashtable_insert_slow(ht, key, obj);
622 } 682 }
623 683
624 elasticity = RHT_ELASTICITY; 684 rht_for_each_from(head, rht_ptr(*bkt), tbl, hash) {
625 pprev = rht_bucket_insert(ht, tbl, hash);
626 data = ERR_PTR(-ENOMEM);
627 if (!pprev)
628 goto out;
629
630 rht_for_each_from(head, *pprev, tbl, hash) {
631 struct rhlist_head *plist; 685 struct rhlist_head *plist;
632 struct rhlist_head *list; 686 struct rhlist_head *list;
633 687
@@ -643,7 +697,7 @@ slow_path:
643 data = rht_obj(ht, head); 697 data = rht_obj(ht, head);
644 698
645 if (!rhlist) 699 if (!rhlist)
646 goto out; 700 goto out_unlock;
647 701
648 702
649 list = container_of(obj, struct rhlist_head, rhead); 703 list = container_of(obj, struct rhlist_head, rhead);
@@ -652,9 +706,13 @@ slow_path:
652 RCU_INIT_POINTER(list->next, plist); 706 RCU_INIT_POINTER(list->next, plist);
653 head = rht_dereference_bucket(head->next, tbl, hash); 707 head = rht_dereference_bucket(head->next, tbl, hash);
654 RCU_INIT_POINTER(list->rhead.next, head); 708 RCU_INIT_POINTER(list->rhead.next, head);
655 rcu_assign_pointer(*pprev, obj); 709 if (pprev) {
656 710 rcu_assign_pointer(*pprev, obj);
657 goto good; 711 rht_unlock(bkt);
712 } else
713 rht_assign_unlock(bkt, obj);
714 data = NULL;
715 goto out;
658 } 716 }
659 717
660 if (elasticity <= 0) 718 if (elasticity <= 0)
@@ -662,12 +720,13 @@ slow_path:
662 720
663 data = ERR_PTR(-E2BIG); 721 data = ERR_PTR(-E2BIG);
664 if (unlikely(rht_grow_above_max(ht, tbl))) 722 if (unlikely(rht_grow_above_max(ht, tbl)))
665 goto out; 723 goto out_unlock;
666 724
667 if (unlikely(rht_grow_above_100(ht, tbl))) 725 if (unlikely(rht_grow_above_100(ht, tbl)))
668 goto slow_path; 726 goto slow_path;
669 727
670 head = rht_dereference_bucket(*pprev, tbl, hash); 728 /* Inserting at head of list makes unlocking free. */
729 head = rht_ptr(rht_dereference_bucket(*bkt, tbl, hash));
671 730
672 RCU_INIT_POINTER(obj->next, head); 731 RCU_INIT_POINTER(obj->next, head);
673 if (rhlist) { 732 if (rhlist) {
@@ -677,20 +736,21 @@ slow_path:
677 RCU_INIT_POINTER(list->next, NULL); 736 RCU_INIT_POINTER(list->next, NULL);
678 } 737 }
679 738
680 rcu_assign_pointer(*pprev, obj);
681
682 atomic_inc(&ht->nelems); 739 atomic_inc(&ht->nelems);
740 rht_assign_unlock(bkt, obj);
741
683 if (rht_grow_above_75(ht, tbl)) 742 if (rht_grow_above_75(ht, tbl))
684 schedule_work(&ht->run_work); 743 schedule_work(&ht->run_work);
685 744
686good:
687 data = NULL; 745 data = NULL;
688
689out: 746out:
690 spin_unlock_bh(lock);
691 rcu_read_unlock(); 747 rcu_read_unlock();
692 748
693 return data; 749 return data;
750
751out_unlock:
752 rht_unlock(bkt);
753 goto out;
694} 754}
695 755
696/** 756/**
@@ -699,9 +759,9 @@ out:
699 * @obj: pointer to hash head inside object 759 * @obj: pointer to hash head inside object
700 * @params: hash table parameters 760 * @params: hash table parameters
701 * 761 *
702 * Will take a per bucket spinlock to protect against mutual mutations 762 * Will take the per bucket bitlock to protect against mutual mutations
703 * on the same bucket. Multiple insertions may occur in parallel unless 763 * on the same bucket. Multiple insertions may occur in parallel unless
704 * they map to the same bucket lock. 764 * they map to the same bucket.
705 * 765 *
706 * It is safe to call this function from atomic context. 766 * It is safe to call this function from atomic context.
707 * 767 *
@@ -728,9 +788,9 @@ static inline int rhashtable_insert_fast(
728 * @list: pointer to hash list head inside object 788 * @list: pointer to hash list head inside object
729 * @params: hash table parameters 789 * @params: hash table parameters
730 * 790 *
731 * Will take a per bucket spinlock to protect against mutual mutations 791 * Will take the per bucket bitlock to protect against mutual mutations
732 * on the same bucket. Multiple insertions may occur in parallel unless 792 * on the same bucket. Multiple insertions may occur in parallel unless
733 * they map to the same bucket lock. 793 * they map to the same bucket.
734 * 794 *
735 * It is safe to call this function from atomic context. 795 * It is safe to call this function from atomic context.
736 * 796 *
@@ -751,9 +811,9 @@ static inline int rhltable_insert_key(
751 * @list: pointer to hash list head inside object 811 * @list: pointer to hash list head inside object
752 * @params: hash table parameters 812 * @params: hash table parameters
753 * 813 *
754 * Will take a per bucket spinlock to protect against mutual mutations 814 * Will take the per bucket bitlock to protect against mutual mutations
755 * on the same bucket. Multiple insertions may occur in parallel unless 815 * on the same bucket. Multiple insertions may occur in parallel unless
756 * they map to the same bucket lock. 816 * they map to the same bucket.
757 * 817 *
758 * It is safe to call this function from atomic context. 818 * It is safe to call this function from atomic context.
759 * 819 *
@@ -880,21 +940,20 @@ static inline int __rhashtable_remove_fast_one(
880 struct rhash_head *obj, const struct rhashtable_params params, 940 struct rhash_head *obj, const struct rhashtable_params params,
881 bool rhlist) 941 bool rhlist)
882{ 942{
943 struct rhash_lock_head __rcu **bkt;
883 struct rhash_head __rcu **pprev; 944 struct rhash_head __rcu **pprev;
884 struct rhash_head *he; 945 struct rhash_head *he;
885 spinlock_t * lock;
886 unsigned int hash; 946 unsigned int hash;
887 int err = -ENOENT; 947 int err = -ENOENT;
888 948
889 hash = rht_head_hashfn(ht, tbl, obj, params); 949 hash = rht_head_hashfn(ht, tbl, obj, params);
890 lock = rht_bucket_lock(tbl, hash); 950 bkt = rht_bucket_var(tbl, hash);
951 if (!bkt)
952 return -ENOENT;
953 pprev = NULL;
954 rht_lock(bkt);
891 955
892 spin_lock_bh(lock); 956 rht_for_each_from(he, rht_ptr(*bkt), tbl, hash) {
893
894 pprev = rht_bucket_var(tbl, hash);
895 if (!pprev)
896 goto out;
897 rht_for_each_from(he, *pprev, tbl, hash) {
898 struct rhlist_head *list; 957 struct rhlist_head *list;
899 958
900 list = container_of(he, struct rhlist_head, rhead); 959 list = container_of(he, struct rhlist_head, rhead);
@@ -934,13 +993,17 @@ static inline int __rhashtable_remove_fast_one(
934 } 993 }
935 } 994 }
936 995
937 rcu_assign_pointer(*pprev, obj); 996 if (pprev) {
938 break; 997 rcu_assign_pointer(*pprev, obj);
998 rht_unlock(bkt);
999 } else {
1000 rht_assign_unlock(bkt, obj);
1001 }
1002 goto unlocked;
939 } 1003 }
940 1004
941out: 1005 rht_unlock(bkt);
942 spin_unlock_bh(lock); 1006unlocked:
943
944 if (err > 0) { 1007 if (err > 0) {
945 atomic_dec(&ht->nelems); 1008 atomic_dec(&ht->nelems);
946 if (unlikely(ht->p.automatic_shrinking && 1009 if (unlikely(ht->p.automatic_shrinking &&
@@ -1029,9 +1092,9 @@ static inline int __rhashtable_replace_fast(
1029 struct rhash_head *obj_old, struct rhash_head *obj_new, 1092 struct rhash_head *obj_old, struct rhash_head *obj_new,
1030 const struct rhashtable_params params) 1093 const struct rhashtable_params params)
1031{ 1094{
1095 struct rhash_lock_head __rcu **bkt;
1032 struct rhash_head __rcu **pprev; 1096 struct rhash_head __rcu **pprev;
1033 struct rhash_head *he; 1097 struct rhash_head *he;
1034 spinlock_t *lock;
1035 unsigned int hash; 1098 unsigned int hash;
1036 int err = -ENOENT; 1099 int err = -ENOENT;
1037 1100
@@ -1042,27 +1105,33 @@ static inline int __rhashtable_replace_fast(
1042 if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) 1105 if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
1043 return -EINVAL; 1106 return -EINVAL;
1044 1107
1045 lock = rht_bucket_lock(tbl, hash); 1108 bkt = rht_bucket_var(tbl, hash);
1109 if (!bkt)
1110 return -ENOENT;
1046 1111
1047 spin_lock_bh(lock); 1112 pprev = NULL;
1113 rht_lock(bkt);
1048 1114
1049 pprev = rht_bucket_var(tbl, hash); 1115 rht_for_each_from(he, rht_ptr(*bkt), tbl, hash) {
1050 if (!pprev)
1051 goto out;
1052 rht_for_each_from(he, *pprev, tbl, hash) {
1053 if (he != obj_old) { 1116 if (he != obj_old) {
1054 pprev = &he->next; 1117 pprev = &he->next;
1055 continue; 1118 continue;
1056 } 1119 }
1057 1120
1058 rcu_assign_pointer(obj_new->next, obj_old->next); 1121 rcu_assign_pointer(obj_new->next, obj_old->next);
1059 rcu_assign_pointer(*pprev, obj_new); 1122 if (pprev) {
1123 rcu_assign_pointer(*pprev, obj_new);
1124 rht_unlock(bkt);
1125 } else {
1126 rht_assign_unlock(bkt, obj_new);
1127 }
1060 err = 0; 1128 err = 0;
1061 break; 1129 goto unlocked;
1062 } 1130 }
1063out:
1064 spin_unlock_bh(lock);
1065 1131
1132 rht_unlock(bkt);
1133
1134unlocked:
1066 return err; 1135 return err;
1067} 1136}
1068 1137
diff --git a/ipc/util.c b/ipc/util.c
index 0af05752969f..095274a871f8 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -101,7 +101,6 @@ static const struct rhashtable_params ipc_kht_params = {
101 .head_offset = offsetof(struct kern_ipc_perm, khtnode), 101 .head_offset = offsetof(struct kern_ipc_perm, khtnode),
102 .key_offset = offsetof(struct kern_ipc_perm, key), 102 .key_offset = offsetof(struct kern_ipc_perm, key),
103 .key_len = FIELD_SIZEOF(struct kern_ipc_perm, key), 103 .key_len = FIELD_SIZEOF(struct kern_ipc_perm, key),
104 .locks_mul = 1,
105 .automatic_shrinking = true, 104 .automatic_shrinking = true,
106}; 105};
107 106
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index b28fdd560ea9..c5d0974467ee 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -31,11 +31,10 @@
31 31
32#define HASH_DEFAULT_SIZE 64UL 32#define HASH_DEFAULT_SIZE 64UL
33#define HASH_MIN_SIZE 4U 33#define HASH_MIN_SIZE 4U
34#define BUCKET_LOCKS_PER_CPU 32UL
35 34
36union nested_table { 35union nested_table {
37 union nested_table __rcu *table; 36 union nested_table __rcu *table;
38 struct rhash_head __rcu *bucket; 37 struct rhash_lock_head __rcu *bucket;
39}; 38};
40 39
41static u32 head_hashfn(struct rhashtable *ht, 40static u32 head_hashfn(struct rhashtable *ht,
@@ -56,9 +55,11 @@ EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
56 55
57int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) 56int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
58{ 57{
59 spinlock_t *lock = rht_bucket_lock(tbl, hash); 58 if (!debug_locks)
60 59 return 1;
61 return (debug_locks) ? lockdep_is_held(lock) : 1; 60 if (unlikely(tbl->nest))
61 return 1;
62 return bit_spin_is_locked(1, (unsigned long *)&tbl->buckets[hash]);
62} 63}
63EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); 64EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
64#else 65#else
@@ -104,7 +105,6 @@ static void bucket_table_free(const struct bucket_table *tbl)
104 if (tbl->nest) 105 if (tbl->nest)
105 nested_bucket_table_free(tbl); 106 nested_bucket_table_free(tbl);
106 107
107 free_bucket_spinlocks(tbl->locks);
108 kvfree(tbl); 108 kvfree(tbl);
109} 109}
110 110
@@ -171,7 +171,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
171 gfp_t gfp) 171 gfp_t gfp)
172{ 172{
173 struct bucket_table *tbl = NULL; 173 struct bucket_table *tbl = NULL;
174 size_t size, max_locks; 174 size_t size;
175 int i; 175 int i;
176 176
177 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 177 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
@@ -189,16 +189,6 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
189 189
190 tbl->size = size; 190 tbl->size = size;
191 191
192 max_locks = size >> 1;
193 if (tbl->nest)
194 max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
195
196 if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
197 ht->p.locks_mul, gfp) < 0) {
198 bucket_table_free(tbl);
199 return NULL;
200 }
201
202 rcu_head_init(&tbl->rcu); 192 rcu_head_init(&tbl->rcu);
203 INIT_LIST_HEAD(&tbl->walkers); 193 INIT_LIST_HEAD(&tbl->walkers);
204 194
@@ -223,24 +213,23 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
223 return new_tbl; 213 return new_tbl;
224} 214}
225 215
226static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) 216static int rhashtable_rehash_one(struct rhashtable *ht,
217 struct rhash_lock_head __rcu **bkt,
218 unsigned int old_hash)
227{ 219{
228 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 220 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
229 struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); 221 struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
230 struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
231 int err = -EAGAIN; 222 int err = -EAGAIN;
232 struct rhash_head *head, *next, *entry; 223 struct rhash_head *head, *next, *entry;
233 spinlock_t *new_bucket_lock; 224 struct rhash_head **pprev = NULL;
234 unsigned int new_hash; 225 unsigned int new_hash;
235 226
236 if (new_tbl->nest) 227 if (new_tbl->nest)
237 goto out; 228 goto out;
238 229
239 err = -ENOENT; 230 err = -ENOENT;
240 if (!pprev)
241 goto out;
242 231
243 rht_for_each_from(entry, *pprev, old_tbl, old_hash) { 232 rht_for_each_from(entry, rht_ptr(*bkt), old_tbl, old_hash) {
244 err = 0; 233 err = 0;
245 next = rht_dereference_bucket(entry->next, old_tbl, old_hash); 234 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
246 235
@@ -255,18 +244,20 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
255 244
256 new_hash = head_hashfn(ht, new_tbl, entry); 245 new_hash = head_hashfn(ht, new_tbl, entry);
257 246
258 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); 247 rht_lock(&new_tbl->buckets[new_hash]);
259 248
260 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); 249 head = rht_ptr(rht_dereference_bucket(new_tbl->buckets[new_hash],
261 head = rht_dereference_bucket(new_tbl->buckets[new_hash], 250 new_tbl, new_hash));
262 new_tbl, new_hash);
263 251
264 RCU_INIT_POINTER(entry->next, head); 252 RCU_INIT_POINTER(entry->next, head);
265 253
266 rcu_assign_pointer(new_tbl->buckets[new_hash], entry); 254 rht_assign_unlock(&new_tbl->buckets[new_hash], entry);
267 spin_unlock(new_bucket_lock);
268 255
269 rcu_assign_pointer(*pprev, next); 256 if (pprev)
257 rcu_assign_pointer(*pprev, next);
258 else
259 /* Need to preserved the bit lock. */
260 rcu_assign_pointer(*bkt, rht_ptr_locked(next));
270 261
271out: 262out:
272 return err; 263 return err;
@@ -276,19 +267,19 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
276 unsigned int old_hash) 267 unsigned int old_hash)
277{ 268{
278 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 269 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
279 spinlock_t *old_bucket_lock; 270 struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
280 int err; 271 int err;
281 272
282 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); 273 if (!bkt)
274 return 0;
275 rht_lock(bkt);
283 276
284 spin_lock_bh(old_bucket_lock); 277 while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
285 while (!(err = rhashtable_rehash_one(ht, old_hash)))
286 ; 278 ;
287 279
288 if (err == -ENOENT) 280 if (err == -ENOENT)
289 err = 0; 281 err = 0;
290 282 rht_unlock(bkt);
291 spin_unlock_bh(old_bucket_lock);
292 283
293 return err; 284 return err;
294} 285}
@@ -485,6 +476,7 @@ fail:
485} 476}
486 477
487static void *rhashtable_lookup_one(struct rhashtable *ht, 478static void *rhashtable_lookup_one(struct rhashtable *ht,
479 struct rhash_lock_head __rcu **bkt,
488 struct bucket_table *tbl, unsigned int hash, 480 struct bucket_table *tbl, unsigned int hash,
489 const void *key, struct rhash_head *obj) 481 const void *key, struct rhash_head *obj)
490{ 482{
@@ -492,15 +484,12 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
492 .ht = ht, 484 .ht = ht,
493 .key = key, 485 .key = key,
494 }; 486 };
495 struct rhash_head __rcu **pprev; 487 struct rhash_head **pprev = NULL;
496 struct rhash_head *head; 488 struct rhash_head *head;
497 int elasticity; 489 int elasticity;
498 490
499 elasticity = RHT_ELASTICITY; 491 elasticity = RHT_ELASTICITY;
500 pprev = rht_bucket_var(tbl, hash); 492 rht_for_each_from(head, rht_ptr(*bkt), tbl, hash) {
501 if (!pprev)
502 return ERR_PTR(-ENOENT);
503 rht_for_each_from(head, *pprev, tbl, hash) {
504 struct rhlist_head *list; 493 struct rhlist_head *list;
505 struct rhlist_head *plist; 494 struct rhlist_head *plist;
506 495
@@ -522,7 +511,11 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
522 RCU_INIT_POINTER(list->next, plist); 511 RCU_INIT_POINTER(list->next, plist);
523 head = rht_dereference_bucket(head->next, tbl, hash); 512 head = rht_dereference_bucket(head->next, tbl, hash);
524 RCU_INIT_POINTER(list->rhead.next, head); 513 RCU_INIT_POINTER(list->rhead.next, head);
525 rcu_assign_pointer(*pprev, obj); 514 if (pprev)
515 rcu_assign_pointer(*pprev, obj);
516 else
517 /* Need to preserve the bit lock */
518 rcu_assign_pointer(*bkt, rht_ptr_locked(obj));
526 519
527 return NULL; 520 return NULL;
528 } 521 }
@@ -534,12 +527,12 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
534} 527}
535 528
536static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, 529static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
530 struct rhash_lock_head __rcu **bkt,
537 struct bucket_table *tbl, 531 struct bucket_table *tbl,
538 unsigned int hash, 532 unsigned int hash,
539 struct rhash_head *obj, 533 struct rhash_head *obj,
540 void *data) 534 void *data)
541{ 535{
542 struct rhash_head __rcu **pprev;
543 struct bucket_table *new_tbl; 536 struct bucket_table *new_tbl;
544 struct rhash_head *head; 537 struct rhash_head *head;
545 538
@@ -562,11 +555,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
562 if (unlikely(rht_grow_above_100(ht, tbl))) 555 if (unlikely(rht_grow_above_100(ht, tbl)))
563 return ERR_PTR(-EAGAIN); 556 return ERR_PTR(-EAGAIN);
564 557
565 pprev = rht_bucket_insert(ht, tbl, hash); 558 head = rht_ptr(rht_dereference_bucket(*bkt, tbl, hash));
566 if (!pprev)
567 return ERR_PTR(-ENOMEM);
568
569 head = rht_dereference_bucket(*pprev, tbl, hash);
570 559
571 RCU_INIT_POINTER(obj->next, head); 560 RCU_INIT_POINTER(obj->next, head);
572 if (ht->rhlist) { 561 if (ht->rhlist) {
@@ -576,7 +565,10 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
576 RCU_INIT_POINTER(list->next, NULL); 565 RCU_INIT_POINTER(list->next, NULL);
577 } 566 }
578 567
579 rcu_assign_pointer(*pprev, obj); 568 /* bkt is always the head of the list, so it holds
569 * the lock, which we need to preserve
570 */
571 rcu_assign_pointer(*bkt, rht_ptr_locked(obj));
580 572
581 atomic_inc(&ht->nelems); 573 atomic_inc(&ht->nelems);
582 if (rht_grow_above_75(ht, tbl)) 574 if (rht_grow_above_75(ht, tbl))
@@ -590,6 +582,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
590{ 582{
591 struct bucket_table *new_tbl; 583 struct bucket_table *new_tbl;
592 struct bucket_table *tbl; 584 struct bucket_table *tbl;
585 struct rhash_lock_head __rcu **bkt;
593 unsigned int hash; 586 unsigned int hash;
594 void *data; 587 void *data;
595 588
@@ -598,14 +591,25 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
598 do { 591 do {
599 tbl = new_tbl; 592 tbl = new_tbl;
600 hash = rht_head_hashfn(ht, tbl, obj, ht->p); 593 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
601 spin_lock_bh(rht_bucket_lock(tbl, hash)); 594 if (rcu_access_pointer(tbl->future_tbl))
602 595 /* Failure is OK */
603 data = rhashtable_lookup_one(ht, tbl, hash, key, obj); 596 bkt = rht_bucket_var(tbl, hash);
604 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); 597 else
605 if (PTR_ERR(new_tbl) != -EEXIST) 598 bkt = rht_bucket_insert(ht, tbl, hash);
606 data = ERR_CAST(new_tbl); 599 if (bkt == NULL) {
607 600 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
608 spin_unlock_bh(rht_bucket_lock(tbl, hash)); 601 data = ERR_PTR(-EAGAIN);
602 } else {
603 rht_lock(bkt);
604 data = rhashtable_lookup_one(ht, bkt, tbl,
605 hash, key, obj);
606 new_tbl = rhashtable_insert_one(ht, bkt, tbl,
607 hash, obj, data);
608 if (PTR_ERR(new_tbl) != -EEXIST)
609 data = ERR_CAST(new_tbl);
610
611 rht_unlock(bkt);
612 }
609 } while (!IS_ERR_OR_NULL(new_tbl)); 613 } while (!IS_ERR_OR_NULL(new_tbl));
610 614
611 if (PTR_ERR(data) == -EAGAIN) 615 if (PTR_ERR(data) == -EAGAIN)
@@ -1032,11 +1036,6 @@ int rhashtable_init(struct rhashtable *ht,
1032 1036
1033 size = rounded_hashtable_size(&ht->p); 1037 size = rounded_hashtable_size(&ht->p);
1034 1038
1035 if (params->locks_mul)
1036 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
1037 else
1038 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
1039
1040 ht->key_len = ht->p.key_len; 1039 ht->key_len = ht->p.key_len;
1041 if (!params->hashfn) { 1040 if (!params->hashfn) {
1042 ht->p.hashfn = jhash; 1041 ht->p.hashfn = jhash;
@@ -1138,7 +1137,7 @@ restart:
1138 struct rhash_head *pos, *next; 1137 struct rhash_head *pos, *next;
1139 1138
1140 cond_resched(); 1139 cond_resched();
1141 for (pos = rht_dereference(*rht_bucket(tbl, i), ht), 1140 for (pos = rht_ptr(rht_dereference(*rht_bucket(tbl, i), ht)),
1142 next = !rht_is_a_nulls(pos) ? 1141 next = !rht_is_a_nulls(pos) ?
1143 rht_dereference(pos->next, ht) : NULL; 1142 rht_dereference(pos->next, ht) : NULL;
1144 !rht_is_a_nulls(pos); 1143 !rht_is_a_nulls(pos);
@@ -1165,8 +1164,8 @@ void rhashtable_destroy(struct rhashtable *ht)
1165} 1164}
1166EXPORT_SYMBOL_GPL(rhashtable_destroy); 1165EXPORT_SYMBOL_GPL(rhashtable_destroy);
1167 1166
1168struct rhash_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl, 1167struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
1169 unsigned int hash) 1168 unsigned int hash)
1170{ 1169{
1171 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 1170 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1172 unsigned int index = hash & ((1 << tbl->nest) - 1); 1171 unsigned int index = hash & ((1 << tbl->nest) - 1);
@@ -1194,10 +1193,10 @@ struct rhash_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
1194} 1193}
1195EXPORT_SYMBOL_GPL(__rht_bucket_nested); 1194EXPORT_SYMBOL_GPL(__rht_bucket_nested);
1196 1195
1197struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, 1196struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1198 unsigned int hash) 1197 unsigned int hash)
1199{ 1198{
1200 static struct rhash_head __rcu *rhnull; 1199 static struct rhash_lock_head __rcu *rhnull;
1201 1200
1202 if (!rhnull) 1201 if (!rhnull)
1203 INIT_RHT_NULLS_HEAD(rhnull); 1202 INIT_RHT_NULLS_HEAD(rhnull);
@@ -1205,9 +1204,9 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1205} 1204}
1206EXPORT_SYMBOL_GPL(rht_bucket_nested); 1205EXPORT_SYMBOL_GPL(rht_bucket_nested);
1207 1206
1208struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, 1207struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1209 struct bucket_table *tbl, 1208 struct bucket_table *tbl,
1210 unsigned int hash) 1209 unsigned int hash)
1211{ 1210{
1212 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 1211 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1213 unsigned int index = hash & ((1 << tbl->nest) - 1); 1212 unsigned int index = hash & ((1 << tbl->nest) - 1);
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 3bd2e91bfc29..02592c2a249c 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -500,7 +500,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
500 struct rhash_head *pos, *next; 500 struct rhash_head *pos, *next;
501 struct test_obj_rhl *p; 501 struct test_obj_rhl *p;
502 502
503 pos = rht_dereference(tbl->buckets[i], ht); 503 pos = rht_ptr(rht_dereference(tbl->buckets[i], ht));
504 next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; 504 next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
505 505
506 if (!rht_is_a_nulls(pos)) { 506 if (!rht_is_a_nulls(pos)) {
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 00573cc46c98..b1c91f66d79c 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -33,7 +33,6 @@ static const struct rhashtable_params br_fdb_rht_params = {
33 .key_offset = offsetof(struct net_bridge_fdb_entry, key), 33 .key_offset = offsetof(struct net_bridge_fdb_entry, key),
34 .key_len = sizeof(struct net_bridge_fdb_key), 34 .key_len = sizeof(struct net_bridge_fdb_key),
35 .automatic_shrinking = true, 35 .automatic_shrinking = true,
36 .locks_mul = 1,
37}; 36};
38 37
39static struct kmem_cache *br_fdb_cache __read_mostly; 38static struct kmem_cache *br_fdb_cache __read_mostly;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 8d82107c6419..812560d7f7a2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -44,7 +44,6 @@ static const struct rhashtable_params br_mdb_rht_params = {
44 .key_offset = offsetof(struct net_bridge_mdb_entry, addr), 44 .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
45 .key_len = sizeof(struct br_ip), 45 .key_len = sizeof(struct br_ip),
46 .automatic_shrinking = true, 46 .automatic_shrinking = true,
47 .locks_mul = 1,
48}; 47};
49 48
50static void br_multicast_start_querier(struct net_bridge *br, 49static void br_multicast_start_querier(struct net_bridge *br,
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 96abf8feb9dc..0a02822b5667 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -21,7 +21,6 @@ static const struct rhashtable_params br_vlan_rht_params = {
21 .key_offset = offsetof(struct net_bridge_vlan, vid), 21 .key_offset = offsetof(struct net_bridge_vlan, vid),
22 .key_len = sizeof(u16), 22 .key_len = sizeof(u16),
23 .nelem_hint = 3, 23 .nelem_hint = 3,
24 .locks_mul = 1,
25 .max_size = VLAN_N_VID, 24 .max_size = VLAN_N_VID,
26 .obj_cmpfn = br_vlan_cmp, 25 .obj_cmpfn = br_vlan_cmp,
27 .automatic_shrinking = true, 26 .automatic_shrinking = true,
diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c
index 6d2c4eed2dc8..758151863669 100644
--- a/net/bridge/br_vlan_tunnel.c
+++ b/net/bridge/br_vlan_tunnel.c
@@ -34,7 +34,6 @@ static const struct rhashtable_params br_vlan_tunnel_rht_params = {
34 .key_offset = offsetof(struct net_bridge_vlan, tinfo.tunnel_id), 34 .key_offset = offsetof(struct net_bridge_vlan, tinfo.tunnel_id),
35 .key_len = sizeof(__be64), 35 .key_len = sizeof(__be64),
36 .nelem_hint = 3, 36 .nelem_hint = 3,
37 .locks_mul = 1,
38 .obj_cmpfn = br_vlan_tunid_cmp, 37 .obj_cmpfn = br_vlan_tunid_cmp,
39 .automatic_shrinking = true, 38 .automatic_shrinking = true,
40}; 39};
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 2c931120c494..9a3f13edc98e 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -373,7 +373,6 @@ static const struct rhashtable_params ipmr_rht_params = {
373 .key_offset = offsetof(struct mfc_cache, cmparg), 373 .key_offset = offsetof(struct mfc_cache, cmparg),
374 .key_len = sizeof(struct mfc_cache_cmp_arg), 374 .key_len = sizeof(struct mfc_cache_cmp_arg),
375 .nelem_hint = 3, 375 .nelem_hint = 3,
376 .locks_mul = 1,
377 .obj_cmpfn = ipmr_hash_cmp, 376 .obj_cmpfn = ipmr_hash_cmp,
378 .automatic_shrinking = true, 377 .automatic_shrinking = true,
379}; 378};
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index e4dd57976737..4e69847ed5be 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -355,7 +355,6 @@ static const struct rhashtable_params ip6mr_rht_params = {
355 .key_offset = offsetof(struct mfc6_cache, cmparg), 355 .key_offset = offsetof(struct mfc6_cache, cmparg),
356 .key_len = sizeof(struct mfc6_cache_cmp_arg), 356 .key_len = sizeof(struct mfc6_cache_cmp_arg),
357 .nelem_hint = 3, 357 .nelem_hint = 3,
358 .locks_mul = 1,
359 .obj_cmpfn = ip6mr_hash_cmp, 358 .obj_cmpfn = ip6mr_hash_cmp,
360 .automatic_shrinking = true, 359 .automatic_shrinking = true,
361}; 360};
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index ef7772e976cc..90e6b09ef2af 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -53,7 +53,6 @@ static const struct rhashtable_params nft_chain_ht_params = {
53 .hashfn = nft_chain_hash, 53 .hashfn = nft_chain_hash,
54 .obj_hashfn = nft_chain_hash_obj, 54 .obj_hashfn = nft_chain_hash_obj,
55 .obj_cmpfn = nft_chain_hash_cmp, 55 .obj_cmpfn = nft_chain_hash_cmp,
56 .locks_mul = 1,
57 .automatic_shrinking = true, 56 .automatic_shrinking = true,
58}; 57};
59 58