aboutsummaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorYing Xue <ying.xue@windriver.com>2015-01-07 00:41:54 -0500
committerDavid S. Miller <davem@davemloft.net>2015-01-08 22:47:11 -0500
commitdb30485408326a6f466a843b291b23535f63eda0 (patch)
tree42d8c8e2b515c03f6c054a27e6d8b9aa9c634284 /lib/rhashtable.c
parent54c5b7d311c8e1801f9dcce9f388a7420a25fa90 (diff)
rhashtable: involve rhashtable_lookup_insert routine
Involve a new function called rhashtable_lookup_insert() which makes lookup and insertion atomic under bucket lock protection, helping us avoid to introduce an extra lock when we search and insert an object into hash table. Signed-off-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Thomas Graf <tgraf@suug.ch> Acked-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c97
1 files changed, 82 insertions, 15 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 20006854fce0..4430233c4e11 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -505,8 +505,26 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht)
505 schedule_delayed_work(&ht->run_work, 0); 505 schedule_delayed_work(&ht->run_work, 0);
506} 506}
507 507
508static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
509 struct bucket_table *tbl, u32 hash)
510{
511 struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash],
512 tbl, hash);
513
514 if (rht_is_a_nulls(head))
515 INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
516 else
517 RCU_INIT_POINTER(obj->next, head);
518
519 rcu_assign_pointer(tbl->buckets[hash], obj);
520
521 atomic_inc(&ht->nelems);
522
523 rhashtable_wakeup_worker(ht);
524}
525
508/** 526/**
509 * rhashtable_insert - insert object into hash hash table 527 * rhashtable_insert - insert object into hash table
510 * @ht: hash table 528 * @ht: hash table
511 * @obj: pointer to hash head inside object 529 * @obj: pointer to hash head inside object
512 * 530 *
@@ -523,7 +541,6 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht)
523void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) 541void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
524{ 542{
525 struct bucket_table *tbl; 543 struct bucket_table *tbl;
526 struct rhash_head *head;
527 spinlock_t *lock; 544 spinlock_t *lock;
528 unsigned hash; 545 unsigned hash;
529 546
@@ -534,19 +551,9 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
534 lock = bucket_lock(tbl, hash); 551 lock = bucket_lock(tbl, hash);
535 552
536 spin_lock_bh(lock); 553 spin_lock_bh(lock);
537 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 554 __rhashtable_insert(ht, obj, tbl, hash);
538 if (rht_is_a_nulls(head))
539 INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
540 else
541 RCU_INIT_POINTER(obj->next, head);
542
543 rcu_assign_pointer(tbl->buckets[hash], obj);
544 spin_unlock_bh(lock); 555 spin_unlock_bh(lock);
545 556
546 atomic_inc(&ht->nelems);
547
548 rhashtable_wakeup_worker(ht);
549
550 rcu_read_unlock(); 557 rcu_read_unlock();
551} 558}
552EXPORT_SYMBOL_GPL(rhashtable_insert); 559EXPORT_SYMBOL_GPL(rhashtable_insert);
@@ -560,7 +567,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert);
560 * walk the bucket chain upon removal. The removal operation is thus 567 * walk the bucket chain upon removal. The removal operation is thus
561 * considerable slow if the hash table is not correctly sized. 568 * considerable slow if the hash table is not correctly sized.
562 * 569 *
563 * Will automatically shrink the table via rhashtable_expand() if the the 570 * Will automatically shrink the table via rhashtable_expand() if the
564 * shrink_decision function specified at rhashtable_init() returns true. 571 * shrink_decision function specified at rhashtable_init() returns true.
565 * 572 *
566 * The caller must ensure that no concurrent table mutations occur. It is 573 * The caller must ensure that no concurrent table mutations occur. It is
@@ -641,7 +648,7 @@ static bool rhashtable_compare(void *ptr, void *arg)
641 * for a entry with an identical key. The first matching entry is returned. 648 * for a entry with an identical key. The first matching entry is returned.
642 * 649 *
643 * This lookup function may only be used for fixed key hash table (key_len 650 * This lookup function may only be used for fixed key hash table (key_len
644 * paramter set). It will BUG() if used inappropriately. 651 * parameter set). It will BUG() if used inappropriately.
645 * 652 *
646 * Lookups may occur in parallel with hashtable mutations and resizing. 653 * Lookups may occur in parallel with hashtable mutations and resizing.
647 */ 654 */
@@ -702,6 +709,66 @@ restart:
702} 709}
703EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); 710EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
704 711
712/**
713 * rhashtable_lookup_insert - lookup and insert object into hash table
714 * @ht: hash table
715 * @obj: pointer to hash head inside object
716 *
717 * Locks down the bucket chain in both the old and new table if a resize
718 * is in progress to ensure that writers can't remove from the old table
719 * and can't insert to the new table during the atomic operation of search
720 * and insertion. Searches for duplicates in both the old and new table if
721 * a resize is in progress.
722 *
723 * This lookup function may only be used for fixed key hash table (key_len
724 * parameter set). It will BUG() if used inappropriately.
725 *
726 * It is safe to call this function from atomic context.
727 *
728 * Will trigger an automatic deferred table resizing if the size grows
729 * beyond the watermark indicated by grow_decision() which can be passed
730 * to rhashtable_init().
731 */
732bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
733{
734 struct bucket_table *new_tbl, *old_tbl;
735 spinlock_t *new_bucket_lock, *old_bucket_lock;
736 u32 new_hash, old_hash;
737 bool success = true;
738
739 BUG_ON(!ht->p.key_len);
740
741 rcu_read_lock();
742
743 old_tbl = rht_dereference_rcu(ht->tbl, ht);
744 old_hash = head_hashfn(ht, old_tbl, obj);
745 old_bucket_lock = bucket_lock(old_tbl, old_hash);
746 spin_lock_bh(old_bucket_lock);
747
748 new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
749 new_hash = head_hashfn(ht, new_tbl, obj);
750 new_bucket_lock = bucket_lock(new_tbl, new_hash);
751 if (unlikely(old_tbl != new_tbl))
752 spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
753
754 if (rhashtable_lookup(ht, rht_obj(ht, obj) + ht->p.key_offset)) {
755 success = false;
756 goto exit;
757 }
758
759 __rhashtable_insert(ht, obj, new_tbl, new_hash);
760
761exit:
762 if (unlikely(old_tbl != new_tbl))
763 spin_unlock_bh(new_bucket_lock);
764 spin_unlock_bh(old_bucket_lock);
765
766 rcu_read_unlock();
767
768 return success;
769}
770EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
771
705static size_t rounded_hashtable_size(struct rhashtable_params *params) 772static size_t rounded_hashtable_size(struct rhashtable_params *params)
706{ 773{
707 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), 774 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),