aboutsummaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c160
1 files changed, 102 insertions, 58 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index ddd7dde87c3c..3825c30aaa36 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -65,42 +65,6 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
65#define ASSERT_RHT_MUTEX(HT) 65#define ASSERT_RHT_MUTEX(HT)
66#endif 66#endif
67 67
68
69static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
70 gfp_t gfp)
71{
72 unsigned int i, size;
73#if defined(CONFIG_PROVE_LOCKING)
74 unsigned int nr_pcpus = 2;
75#else
76 unsigned int nr_pcpus = num_possible_cpus();
77#endif
78
79 nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
80 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
81
82 /* Never allocate more than 0.5 locks per bucket */
83 size = min_t(unsigned int, size, tbl->size >> 1);
84
85 if (tbl->nest)
86 size = min(size, 1U << tbl->nest);
87
88 if (sizeof(spinlock_t) != 0) {
89 if (gfpflags_allow_blocking(gfp))
90 tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
91 else
92 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
93 gfp);
94 if (!tbl->locks)
95 return -ENOMEM;
96 for (i = 0; i < size; i++)
97 spin_lock_init(&tbl->locks[i]);
98 }
99 tbl->locks_mask = size - 1;
100
101 return 0;
102}
103
104static void nested_table_free(union nested_table *ntbl, unsigned int size) 68static void nested_table_free(union nested_table *ntbl, unsigned int size)
105{ 69{
106 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 70 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
@@ -140,7 +104,7 @@ static void bucket_table_free(const struct bucket_table *tbl)
140 if (tbl->nest) 104 if (tbl->nest)
141 nested_bucket_table_free(tbl); 105 nested_bucket_table_free(tbl);
142 106
143 kvfree(tbl->locks); 107 free_bucket_spinlocks(tbl->locks);
144 kvfree(tbl); 108 kvfree(tbl);
145} 109}
146 110
@@ -207,7 +171,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
207 gfp_t gfp) 171 gfp_t gfp)
208{ 172{
209 struct bucket_table *tbl = NULL; 173 struct bucket_table *tbl = NULL;
210 size_t size; 174 size_t size, max_locks;
211 int i; 175 int i;
212 176
213 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 177 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
@@ -227,7 +191,12 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
227 191
228 tbl->size = size; 192 tbl->size = size;
229 193
230 if (alloc_bucket_locks(ht, tbl, gfp) < 0) { 194 max_locks = size >> 1;
195 if (tbl->nest)
196 max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
197
198 if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
199 ht->p.locks_mul, gfp) < 0) {
231 bucket_table_free(tbl); 200 bucket_table_free(tbl);
232 return NULL; 201 return NULL;
233 } 202 }
@@ -707,6 +676,7 @@ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
707 iter->p = NULL; 676 iter->p = NULL;
708 iter->slot = 0; 677 iter->slot = 0;
709 iter->skip = 0; 678 iter->skip = 0;
679 iter->end_of_table = 0;
710 680
711 spin_lock(&ht->lock); 681 spin_lock(&ht->lock);
712 iter->walker.tbl = 682 iter->walker.tbl =
@@ -732,7 +702,7 @@ void rhashtable_walk_exit(struct rhashtable_iter *iter)
732EXPORT_SYMBOL_GPL(rhashtable_walk_exit); 702EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
733 703
734/** 704/**
735 * rhashtable_walk_start - Start a hash table walk 705 * rhashtable_walk_start_check - Start a hash table walk
736 * @iter: Hash table iterator 706 * @iter: Hash table iterator
737 * 707 *
738 * Start a hash table walk at the current iterator position. Note that we take 708 * Start a hash table walk at the current iterator position. Note that we take
@@ -744,8 +714,12 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
744 * Returns -EAGAIN if resize event occured. Note that the iterator 714 * Returns -EAGAIN if resize event occured. Note that the iterator
745 * will rewind back to the beginning and you may use it immediately 715 * will rewind back to the beginning and you may use it immediately
746 * by calling rhashtable_walk_next. 716 * by calling rhashtable_walk_next.
717 *
718 * rhashtable_walk_start is defined as an inline variant that returns
719 * void. This is preferred in cases where the caller would ignore
720 * resize events and always continue.
747 */ 721 */
748int rhashtable_walk_start(struct rhashtable_iter *iter) 722int rhashtable_walk_start_check(struct rhashtable_iter *iter)
749 __acquires(RCU) 723 __acquires(RCU)
750{ 724{
751 struct rhashtable *ht = iter->ht; 725 struct rhashtable *ht = iter->ht;
@@ -757,28 +731,26 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
757 list_del(&iter->walker.list); 731 list_del(&iter->walker.list);
758 spin_unlock(&ht->lock); 732 spin_unlock(&ht->lock);
759 733
760 if (!iter->walker.tbl) { 734 if (!iter->walker.tbl && !iter->end_of_table) {
761 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); 735 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
762 return -EAGAIN; 736 return -EAGAIN;
763 } 737 }
764 738
765 return 0; 739 return 0;
766} 740}
767EXPORT_SYMBOL_GPL(rhashtable_walk_start); 741EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
768 742
769/** 743/**
770 * rhashtable_walk_next - Return the next object and advance the iterator 744 * __rhashtable_walk_find_next - Find the next element in a table (or the first
771 * @iter: Hash table iterator 745 * one in case of a new walk).
772 * 746 *
773 * Note that you must call rhashtable_walk_stop when you are finished 747 * @iter: Hash table iterator
774 * with the walk.
775 * 748 *
776 * Returns the next object or NULL when the end of the table is reached. 749 * Returns the found object or NULL when the end of the table is reached.
777 * 750 *
778 * Returns -EAGAIN if resize event occured. Note that the iterator 751 * Returns -EAGAIN if resize event occurred.
779 * will rewind back to the beginning and you may continue to use it.
780 */ 752 */
781void *rhashtable_walk_next(struct rhashtable_iter *iter) 753static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
782{ 754{
783 struct bucket_table *tbl = iter->walker.tbl; 755 struct bucket_table *tbl = iter->walker.tbl;
784 struct rhlist_head *list = iter->list; 756 struct rhlist_head *list = iter->list;
@@ -786,13 +758,8 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
786 struct rhash_head *p = iter->p; 758 struct rhash_head *p = iter->p;
787 bool rhlist = ht->rhlist; 759 bool rhlist = ht->rhlist;
788 760
789 if (p) { 761 if (!tbl)
790 if (!rhlist || !(list = rcu_dereference(list->next))) { 762 return NULL;
791 p = rcu_dereference(p->next);
792 list = container_of(p, struct rhlist_head, rhead);
793 }
794 goto next;
795 }
796 763
797 for (; iter->slot < tbl->size; iter->slot++) { 764 for (; iter->slot < tbl->size; iter->slot++) {
798 int skip = iter->skip; 765 int skip = iter->skip;
@@ -836,13 +803,90 @@ next:
836 iter->slot = 0; 803 iter->slot = 0;
837 iter->skip = 0; 804 iter->skip = 0;
838 return ERR_PTR(-EAGAIN); 805 return ERR_PTR(-EAGAIN);
806 } else {
807 iter->end_of_table = true;
839 } 808 }
840 809
841 return NULL; 810 return NULL;
842} 811}
812
813/**
814 * rhashtable_walk_next - Return the next object and advance the iterator
815 * @iter: Hash table iterator
816 *
817 * Note that you must call rhashtable_walk_stop when you are finished
818 * with the walk.
819 *
820 * Returns the next object or NULL when the end of the table is reached.
821 *
822 * Returns -EAGAIN if resize event occurred. Note that the iterator
823 * will rewind back to the beginning and you may continue to use it.
824 */
825void *rhashtable_walk_next(struct rhashtable_iter *iter)
826{
827 struct rhlist_head *list = iter->list;
828 struct rhashtable *ht = iter->ht;
829 struct rhash_head *p = iter->p;
830 bool rhlist = ht->rhlist;
831
832 if (p) {
833 if (!rhlist || !(list = rcu_dereference(list->next))) {
834 p = rcu_dereference(p->next);
835 list = container_of(p, struct rhlist_head, rhead);
836 }
837 if (!rht_is_a_nulls(p)) {
838 iter->skip++;
839 iter->p = p;
840 iter->list = list;
841 return rht_obj(ht, rhlist ? &list->rhead : p);
842 }
843
844 /* At the end of this slot, switch to next one and then find
845 * next entry from that point.
846 */
847 iter->skip = 0;
848 iter->slot++;
849 }
850
851 return __rhashtable_walk_find_next(iter);
852}
843EXPORT_SYMBOL_GPL(rhashtable_walk_next); 853EXPORT_SYMBOL_GPL(rhashtable_walk_next);
844 854
845/** 855/**
856 * rhashtable_walk_peek - Return the next object but don't advance the iterator
857 * @iter: Hash table iterator
858 *
859 * Returns the next object or NULL when the end of the table is reached.
860 *
861 * Returns -EAGAIN if resize event occurred. Note that the iterator
862 * will rewind back to the beginning and you may continue to use it.
863 */
864void *rhashtable_walk_peek(struct rhashtable_iter *iter)
865{
866 struct rhlist_head *list = iter->list;
867 struct rhashtable *ht = iter->ht;
868 struct rhash_head *p = iter->p;
869
870 if (p)
871 return rht_obj(ht, ht->rhlist ? &list->rhead : p);
872
873 /* No object found in current iter, find next one in the table. */
874
875 if (iter->skip) {
876 /* A nonzero skip value points to the next entry in the table
877 * beyond that last one that was found. Decrement skip so
878 * we find the current value. __rhashtable_walk_find_next
879 * will restore the original value of skip assuming that
880 * the table hasn't changed.
881 */
882 iter->skip--;
883 }
884
885 return __rhashtable_walk_find_next(iter);
886}
887EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
888
889/**
846 * rhashtable_walk_stop - Finish a hash table walk 890 * rhashtable_walk_stop - Finish a hash table walk
847 * @iter: Hash table iterator 891 * @iter: Hash table iterator
848 * 892 *