aboutsummaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-03-11 23:49:39 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-12 14:35:30 -0400
commiteca849333017cab1cd02c8fc9187962fa629b27d (patch)
treec3e7ef12bc272f002b448e5660c2ab849e9e689a /lib/rhashtable.c
parent8d2b18793d16e4186f00b07d031a25537c4cefb9 (diff)
rhashtable: Use head_hashfn instead of obj_raw_hashfn
Now that we don't have cross-table hashes, we no longer need to keep the entire hash value so all users of obj_raw_hashfn can use head_hashfn instead. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Acked-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index ff9cc3386fc9..03fdaf869c4d 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -403,7 +403,7 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
403 rcu_read_lock(); 403 rcu_read_lock();
404 404
405 old_tbl = rht_dereference_rcu(ht->tbl, ht); 405 old_tbl = rht_dereference_rcu(ht->tbl, ht);
406 hash = obj_raw_hashfn(ht, old_tbl, rht_obj(ht, obj)); 406 hash = head_hashfn(ht, old_tbl, obj);
407 407
408 spin_lock_bh(bucket_lock(old_tbl, hash)); 408 spin_lock_bh(bucket_lock(old_tbl, hash));
409 409
@@ -415,7 +415,7 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
415 */ 415 */
416 tbl = rht_dereference_rcu(ht->future_tbl, ht); 416 tbl = rht_dereference_rcu(ht->future_tbl, ht);
417 if (tbl != old_tbl) { 417 if (tbl != old_tbl) {
418 hash = obj_raw_hashfn(ht, tbl, rht_obj(ht, obj)); 418 hash = head_hashfn(ht, tbl, obj);
419 spin_lock_nested(bucket_lock(tbl, hash), RHT_LOCK_NESTED); 419 spin_lock_nested(bucket_lock(tbl, hash), RHT_LOCK_NESTED);
420 } 420 }
421 421
@@ -428,7 +428,6 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
428 428
429 no_resize_running = tbl == old_tbl; 429 no_resize_running = tbl == old_tbl;
430 430
431 hash = rht_bucket_index(tbl, hash);
432 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 431 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
433 432
434 if (rht_is_a_nulls(head)) 433 if (rht_is_a_nulls(head))
@@ -444,11 +443,11 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
444 443
445exit: 444exit:
446 if (tbl != old_tbl) { 445 if (tbl != old_tbl) {
447 hash = obj_raw_hashfn(ht, tbl, rht_obj(ht, obj)); 446 hash = head_hashfn(ht, tbl, obj);
448 spin_unlock(bucket_lock(tbl, hash)); 447 spin_unlock(bucket_lock(tbl, hash));
449 } 448 }
450 449
451 hash = obj_raw_hashfn(ht, old_tbl, rht_obj(ht, obj)); 450 hash = head_hashfn(ht, old_tbl, obj);
452 spin_unlock_bh(bucket_lock(old_tbl, hash)); 451 spin_unlock_bh(bucket_lock(old_tbl, hash));
453 452
454 rcu_read_unlock(); 453 rcu_read_unlock();
@@ -487,9 +486,8 @@ static bool __rhashtable_remove(struct rhashtable *ht,
487 unsigned hash; 486 unsigned hash;
488 bool ret = false; 487 bool ret = false;
489 488
490 hash = obj_raw_hashfn(ht, tbl, rht_obj(ht, obj)); 489 hash = head_hashfn(ht, tbl, obj);
491 lock = bucket_lock(tbl, hash); 490 lock = bucket_lock(tbl, hash);
492 hash = rht_bucket_index(tbl, hash);
493 491
494 spin_lock_bh(lock); 492 spin_lock_bh(lock);
495 493