aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-03-12 14:35:35 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-12 14:35:35 -0400
commit6c7005f6cb1bf63bb1f505ef69364fb2cc00628e (patch)
tree4798fe620f14eaab4403f0715aab12dbfaeed00f
parent6b9f53bc102d4e61b73c13f661de4a1c358768c1 (diff)
parentec9f71c59e00388efc1337307511b59cc4c48394 (diff)
Merge branch 'rhashtable-cleanups'
Herbert Xu says: ==================== rhashtable hash cleanups This is a rebase on top of the nested lock annotation fix. Nothing to see here, just a bunch of simple clean-ups before I move onto something more substantial (hopefully). ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--lib/rhashtable.c43
1 files changed, 16 insertions, 27 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index d7f3db57b5d0..6ffc793145f3 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -63,34 +63,25 @@ static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
63 63
64static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash) 64static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
65{ 65{
66 return hash & (tbl->size - 1); 66 return (hash >> HASH_RESERVED_SPACE) & (tbl->size - 1);
67}
68
69static u32 obj_raw_hashfn(struct rhashtable *ht,
70 const struct bucket_table *tbl, const void *ptr)
71{
72 u32 hash;
73
74 if (unlikely(!ht->p.key_len))
75 hash = ht->p.obj_hashfn(ptr, tbl->hash_rnd);
76 else
77 hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
78 tbl->hash_rnd);
79
80 return hash >> HASH_RESERVED_SPACE;
81} 67}
82 68
83static u32 key_hashfn(struct rhashtable *ht, const struct bucket_table *tbl, 69static u32 key_hashfn(struct rhashtable *ht, const struct bucket_table *tbl,
84 const void *key, u32 len) 70 const void *key)
85{ 71{
86 return ht->p.hashfn(key, len, tbl->hash_rnd) >> HASH_RESERVED_SPACE; 72 return rht_bucket_index(tbl, ht->p.hashfn(key, ht->p.key_len,
73 tbl->hash_rnd));
87} 74}
88 75
89static u32 head_hashfn(struct rhashtable *ht, 76static u32 head_hashfn(struct rhashtable *ht,
90 const struct bucket_table *tbl, 77 const struct bucket_table *tbl,
91 const struct rhash_head *he) 78 const struct rhash_head *he)
92{ 79{
93 return rht_bucket_index(tbl, obj_raw_hashfn(ht, tbl, rht_obj(ht, he))); 80 const char *ptr = rht_obj(ht, he);
81
82 return likely(ht->p.key_len) ?
83 key_hashfn(ht, tbl, ptr + ht->p.key_offset) :
84 rht_bucket_index(tbl, ht->p.obj_hashfn(ptr, tbl->hash_rnd));
94} 85}
95 86
96#ifdef CONFIG_PROVE_LOCKING 87#ifdef CONFIG_PROVE_LOCKING
@@ -402,7 +393,7 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
402 rcu_read_lock(); 393 rcu_read_lock();
403 394
404 old_tbl = rht_dereference_rcu(ht->tbl, ht); 395 old_tbl = rht_dereference_rcu(ht->tbl, ht);
405 hash = obj_raw_hashfn(ht, old_tbl, rht_obj(ht, obj)); 396 hash = head_hashfn(ht, old_tbl, obj);
406 397
407 spin_lock_bh(bucket_lock(old_tbl, hash)); 398 spin_lock_bh(bucket_lock(old_tbl, hash));
408 399
@@ -414,7 +405,7 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
414 */ 405 */
415 tbl = rht_dereference_rcu(ht->future_tbl, ht); 406 tbl = rht_dereference_rcu(ht->future_tbl, ht);
416 if (tbl != old_tbl) { 407 if (tbl != old_tbl) {
417 hash = obj_raw_hashfn(ht, tbl, rht_obj(ht, obj)); 408 hash = head_hashfn(ht, tbl, obj);
418 spin_lock_nested(bucket_lock(tbl, hash), RHT_LOCK_NESTED); 409 spin_lock_nested(bucket_lock(tbl, hash), RHT_LOCK_NESTED);
419 } 410 }
420 411
@@ -427,7 +418,6 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
427 418
428 no_resize_running = tbl == old_tbl; 419 no_resize_running = tbl == old_tbl;
429 420
430 hash = rht_bucket_index(tbl, hash);
431 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 421 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
432 422
433 if (rht_is_a_nulls(head)) 423 if (rht_is_a_nulls(head))
@@ -443,11 +433,11 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
443 433
444exit: 434exit:
445 if (tbl != old_tbl) { 435 if (tbl != old_tbl) {
446 hash = obj_raw_hashfn(ht, tbl, rht_obj(ht, obj)); 436 hash = head_hashfn(ht, tbl, obj);
447 spin_unlock(bucket_lock(tbl, hash)); 437 spin_unlock(bucket_lock(tbl, hash));
448 } 438 }
449 439
450 hash = obj_raw_hashfn(ht, old_tbl, rht_obj(ht, obj)); 440 hash = head_hashfn(ht, old_tbl, obj);
451 spin_unlock_bh(bucket_lock(old_tbl, hash)); 441 spin_unlock_bh(bucket_lock(old_tbl, hash));
452 442
453 rcu_read_unlock(); 443 rcu_read_unlock();
@@ -486,9 +476,8 @@ static bool __rhashtable_remove(struct rhashtable *ht,
486 unsigned hash; 476 unsigned hash;
487 bool ret = false; 477 bool ret = false;
488 478
489 hash = obj_raw_hashfn(ht, tbl, rht_obj(ht, obj)); 479 hash = head_hashfn(ht, tbl, obj);
490 lock = bucket_lock(tbl, hash); 480 lock = bucket_lock(tbl, hash);
491 hash = rht_bucket_index(tbl, hash);
492 481
493 spin_lock_bh(lock); 482 spin_lock_bh(lock);
494 483
@@ -620,9 +609,9 @@ void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
620 rcu_read_lock(); 609 rcu_read_lock();
621 610
622 tbl = rht_dereference_rcu(ht->tbl, ht); 611 tbl = rht_dereference_rcu(ht->tbl, ht);
623 hash = key_hashfn(ht, tbl, key, ht->p.key_len); 612 hash = key_hashfn(ht, tbl, key);
624restart: 613restart:
625 rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { 614 rht_for_each_rcu(he, tbl, hash) {
626 if (!compare(rht_obj(ht, he), arg)) 615 if (!compare(rht_obj(ht, he), arg))
627 continue; 616 continue;
628 rcu_read_unlock(); 617 rcu_read_unlock();