diff options
author | Thomas Graf <tgraf@suug.ch> | 2015-01-02 17:00:16 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-01-03 14:32:56 -0500 |
commit | 88d6ed15acff1cb44b1d1f3c0a393b7f7744957a (patch) | |
tree | ee25a48f8ab11d06c062480c89ba1e96c8113e57 /lib | |
parent | a4b18cda4c2676a4b4b59622b2e0394dc153e00b (diff) |
rhashtable: Convert bucket iterators to take table and index
This patch is in preparation to introduce per bucket spinlocks. It
extends all iterator macros to take the bucket table and bucket
index. It also introduces a new rht_dereference_bucket() to
handle protected accesses to buckets.
It introduces a barrier() to the RCU iterators to the prevent
the compiler from caching the first element.
The lockdep verifier is introduced as stub which always succeeds
and properly implement in the next patch when the locks are
introduced.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/rhashtable.c | 30 |
1 files changed, 19 insertions, 11 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index b658245826a1..ce450d095fdf 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -35,6 +35,12 @@ int lockdep_rht_mutex_is_held(const struct rhashtable *ht) | |||
35 | return ht->p.mutex_is_held(ht->p.parent); | 35 | return ht->p.mutex_is_held(ht->p.parent); |
36 | } | 36 | } |
37 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); | 37 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); |
38 | |||
39 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) | ||
40 | { | ||
41 | return 1; | ||
42 | } | ||
43 | EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); | ||
38 | #endif | 44 | #endif |
39 | 45 | ||
40 | static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) | 46 | static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) |
@@ -141,7 +147,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, | |||
141 | * previous node p. Call the previous node p; | 147 | * previous node p. Call the previous node p; |
142 | */ | 148 | */ |
143 | h = head_hashfn(ht, new_tbl, p); | 149 | h = head_hashfn(ht, new_tbl, p); |
144 | rht_for_each(he, p->next, ht) { | 150 | rht_for_each_continue(he, p->next, old_tbl, n) { |
145 | if (head_hashfn(ht, new_tbl, he) != h) | 151 | if (head_hashfn(ht, new_tbl, he) != h) |
146 | break; | 152 | break; |
147 | p = he; | 153 | p = he; |
@@ -153,7 +159,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, | |||
153 | */ | 159 | */ |
154 | next = NULL; | 160 | next = NULL; |
155 | if (he) { | 161 | if (he) { |
156 | rht_for_each(he, he->next, ht) { | 162 | rht_for_each_continue(he, he->next, old_tbl, n) { |
157 | if (head_hashfn(ht, new_tbl, he) == h) { | 163 | if (head_hashfn(ht, new_tbl, he) == h) { |
158 | next = he; | 164 | next = he; |
159 | break; | 165 | break; |
@@ -208,7 +214,7 @@ int rhashtable_expand(struct rhashtable *ht) | |||
208 | */ | 214 | */ |
209 | for (i = 0; i < new_tbl->size; i++) { | 215 | for (i = 0; i < new_tbl->size; i++) { |
210 | h = rht_bucket_index(old_tbl, i); | 216 | h = rht_bucket_index(old_tbl, i); |
211 | rht_for_each(he, old_tbl->buckets[h], ht) { | 217 | rht_for_each(he, old_tbl, h) { |
212 | if (head_hashfn(ht, new_tbl, he) == i) { | 218 | if (head_hashfn(ht, new_tbl, he) == i) { |
213 | RCU_INIT_POINTER(new_tbl->buckets[i], he); | 219 | RCU_INIT_POINTER(new_tbl->buckets[i], he); |
214 | break; | 220 | break; |
@@ -286,7 +292,7 @@ int rhashtable_shrink(struct rhashtable *ht) | |||
286 | * to the new bucket. | 292 | * to the new bucket. |
287 | */ | 293 | */ |
288 | for (pprev = &ntbl->buckets[i]; *pprev != NULL; | 294 | for (pprev = &ntbl->buckets[i]; *pprev != NULL; |
289 | pprev = &rht_dereference(*pprev, ht)->next) | 295 | pprev = &rht_dereference_bucket(*pprev, ntbl, i)->next) |
290 | ; | 296 | ; |
291 | RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]); | 297 | RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]); |
292 | } | 298 | } |
@@ -386,7 +392,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) | |||
386 | h = head_hashfn(ht, tbl, obj); | 392 | h = head_hashfn(ht, tbl, obj); |
387 | 393 | ||
388 | pprev = &tbl->buckets[h]; | 394 | pprev = &tbl->buckets[h]; |
389 | rht_for_each(he, tbl->buckets[h], ht) { | 395 | rht_for_each(he, tbl, h) { |
390 | if (he != obj) { | 396 | if (he != obj) { |
391 | pprev = &he->next; | 397 | pprev = &he->next; |
392 | continue; | 398 | continue; |
@@ -423,7 +429,7 @@ void *rhashtable_lookup(const struct rhashtable *ht, const void *key) | |||
423 | BUG_ON(!ht->p.key_len); | 429 | BUG_ON(!ht->p.key_len); |
424 | 430 | ||
425 | h = key_hashfn(ht, key, ht->p.key_len); | 431 | h = key_hashfn(ht, key, ht->p.key_len); |
426 | rht_for_each_rcu(he, tbl->buckets[h], ht) { | 432 | rht_for_each_rcu(he, tbl, h) { |
427 | if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, | 433 | if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, |
428 | ht->p.key_len)) | 434 | ht->p.key_len)) |
429 | continue; | 435 | continue; |
@@ -457,7 +463,7 @@ void *rhashtable_lookup_compare(const struct rhashtable *ht, const void *key, | |||
457 | u32 hash; | 463 | u32 hash; |
458 | 464 | ||
459 | hash = key_hashfn(ht, key, ht->p.key_len); | 465 | hash = key_hashfn(ht, key, ht->p.key_len); |
460 | rht_for_each_rcu(he, tbl->buckets[hash], ht) { | 466 | rht_for_each_rcu(he, tbl, hash) { |
461 | if (!compare(rht_obj(ht, he), arg)) | 467 | if (!compare(rht_obj(ht, he), arg)) |
462 | continue; | 468 | continue; |
463 | return rht_obj(ht, he); | 469 | return rht_obj(ht, he); |
@@ -625,6 +631,7 @@ static int __init test_rht_lookup(struct rhashtable *ht) | |||
625 | static void test_bucket_stats(struct rhashtable *ht, bool quiet) | 631 | static void test_bucket_stats(struct rhashtable *ht, bool quiet) |
626 | { | 632 | { |
627 | unsigned int cnt, rcu_cnt, i, total = 0; | 633 | unsigned int cnt, rcu_cnt, i, total = 0; |
634 | struct rhash_head *pos; | ||
628 | struct test_obj *obj; | 635 | struct test_obj *obj; |
629 | struct bucket_table *tbl; | 636 | struct bucket_table *tbl; |
630 | 637 | ||
@@ -635,14 +642,14 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet) | |||
635 | if (!quiet) | 642 | if (!quiet) |
636 | pr_info(" [%#4x/%zu]", i, tbl->size); | 643 | pr_info(" [%#4x/%zu]", i, tbl->size); |
637 | 644 | ||
638 | rht_for_each_entry_rcu(obj, tbl->buckets[i], node) { | 645 | rht_for_each_entry_rcu(obj, pos, tbl, i, node) { |
639 | cnt++; | 646 | cnt++; |
640 | total++; | 647 | total++; |
641 | if (!quiet) | 648 | if (!quiet) |
642 | pr_cont(" [%p],", obj); | 649 | pr_cont(" [%p],", obj); |
643 | } | 650 | } |
644 | 651 | ||
645 | rht_for_each_entry_rcu(obj, tbl->buckets[i], node) | 652 | rht_for_each_entry_rcu(obj, pos, tbl, i, node) |
646 | rcu_cnt++; | 653 | rcu_cnt++; |
647 | 654 | ||
648 | if (rcu_cnt != cnt) | 655 | if (rcu_cnt != cnt) |
@@ -664,7 +671,8 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet) | |||
664 | static int __init test_rhashtable(struct rhashtable *ht) | 671 | static int __init test_rhashtable(struct rhashtable *ht) |
665 | { | 672 | { |
666 | struct bucket_table *tbl; | 673 | struct bucket_table *tbl; |
667 | struct test_obj *obj, *next; | 674 | struct test_obj *obj; |
675 | struct rhash_head *pos, *next; | ||
668 | int err; | 676 | int err; |
669 | unsigned int i; | 677 | unsigned int i; |
670 | 678 | ||
@@ -733,7 +741,7 @@ static int __init test_rhashtable(struct rhashtable *ht) | |||
733 | error: | 741 | error: |
734 | tbl = rht_dereference_rcu(ht->tbl, ht); | 742 | tbl = rht_dereference_rcu(ht->tbl, ht); |
735 | for (i = 0; i < tbl->size; i++) | 743 | for (i = 0; i < tbl->size; i++) |
736 | rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node) | 744 | rht_for_each_entry_safe(obj, pos, next, tbl, i, node) |
737 | kfree(obj); | 745 | kfree(obj); |
738 | 746 | ||
739 | return err; | 747 | return err; |