aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rhashtable.h34
-rw-r--r--lib/rhashtable.c8
2 files changed, 31 insertions, 11 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index eb7111039247..20f9c6af7473 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -75,8 +75,19 @@ struct bucket_table {
75 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; 75 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
76}; 76};
77 77
78/*
79 * NULLS_MARKER() expects a hash value with the low
80 * bits mostly likely to be significant, and it discards
81 * the msb.
82 * We git it an address, in which the bottom 2 bits are
83 * always 0, and the msb might be significant.
84 * So we shift the address down one bit to align with
85 * expectations and avoid losing a significant bit.
86 */
87#define RHT_NULLS_MARKER(ptr) \
88 ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
78#define INIT_RHT_NULLS_HEAD(ptr) \ 89#define INIT_RHT_NULLS_HEAD(ptr) \
79 ((ptr) = (typeof(ptr)) NULLS_MARKER(0)) 90 ((ptr) = RHT_NULLS_MARKER(&(ptr)))
80 91
81static inline bool rht_is_a_nulls(const struct rhash_head *ptr) 92static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
82{ 93{
@@ -471,6 +482,7 @@ static inline struct rhash_head *__rhashtable_lookup(
471 .ht = ht, 482 .ht = ht,
472 .key = key, 483 .key = key,
473 }; 484 };
485 struct rhash_head __rcu * const *head;
474 struct bucket_table *tbl; 486 struct bucket_table *tbl;
475 struct rhash_head *he; 487 struct rhash_head *he;
476 unsigned int hash; 488 unsigned int hash;
@@ -478,13 +490,19 @@ static inline struct rhash_head *__rhashtable_lookup(
478 tbl = rht_dereference_rcu(ht->tbl, ht); 490 tbl = rht_dereference_rcu(ht->tbl, ht);
479restart: 491restart:
480 hash = rht_key_hashfn(ht, tbl, key, params); 492 hash = rht_key_hashfn(ht, tbl, key, params);
481 rht_for_each_rcu(he, tbl, hash) { 493 head = rht_bucket(tbl, hash);
482 if (params.obj_cmpfn ? 494 do {
483 params.obj_cmpfn(&arg, rht_obj(ht, he)) : 495 rht_for_each_rcu_continue(he, *head, tbl, hash) {
484 rhashtable_compare(&arg, rht_obj(ht, he))) 496 if (params.obj_cmpfn ?
485 continue; 497 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
486 return he; 498 rhashtable_compare(&arg, rht_obj(ht, he)))
487 } 499 continue;
500 return he;
501 }
502 /* An object might have been moved to a different hash chain,
503 * while we walk along it - better check and retry.
504 */
505 } while (he != RHT_NULLS_MARKER(head));
488 506
489 /* Ensure we see any new tables. */ 507 /* Ensure we see any new tables. */
490 smp_rmb(); 508 smp_rmb();
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 30526afa8343..852ffa5160f1 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -1179,8 +1179,7 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1179 unsigned int hash) 1179 unsigned int hash)
1180{ 1180{
1181 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 1181 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1182 static struct rhash_head __rcu *rhnull = 1182 static struct rhash_head __rcu *rhnull;
1183 (struct rhash_head __rcu *)NULLS_MARKER(0);
1184 unsigned int index = hash & ((1 << tbl->nest) - 1); 1183 unsigned int index = hash & ((1 << tbl->nest) - 1);
1185 unsigned int size = tbl->size >> tbl->nest; 1184 unsigned int size = tbl->size >> tbl->nest;
1186 unsigned int subhash = hash; 1185 unsigned int subhash = hash;
@@ -1198,8 +1197,11 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1198 subhash >>= shift; 1197 subhash >>= shift;
1199 } 1198 }
1200 1199
1201 if (!ntbl) 1200 if (!ntbl) {
1201 if (!rhnull)
1202 INIT_RHT_NULLS_HEAD(rhnull);
1202 return &rhnull; 1203 return &rhnull;
1204 }
1203 1205
1204 return &ntbl[subhash].bucket; 1206 return &ntbl[subhash].bucket;
1205 1207