aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Herbert <tom@quantonium.net>2017-12-04 13:31:45 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-11 09:58:39 -0500
commit64e0cd0d3540dbbdf6661943025409e6b31d5178 (patch)
tree98769c3324dd57bc2070c08852c9bf1f272ba2b8
parent92f36cca5773cbaa78c46ccf49503964a52da294 (diff)
rhashtable: Call library function alloc_bucket_locks
To allocate the array of bucket locks for the hash table we now call library function alloc_bucket_spinlocks. This function is based on the old alloc_bucket_locks in rhashtable and should produce the same effect. Signed-off-by: Tom Herbert <tom@quantonium.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--lib/rhashtable.c47
1 files changed, 8 insertions, 39 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6fc52d82efe6..3825c30aaa36 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -65,42 +65,6 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
65#define ASSERT_RHT_MUTEX(HT) 65#define ASSERT_RHT_MUTEX(HT)
66#endif 66#endif
67 67
68
69static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
70 gfp_t gfp)
71{
72 unsigned int i, size;
73#if defined(CONFIG_PROVE_LOCKING)
74 unsigned int nr_pcpus = 2;
75#else
76 unsigned int nr_pcpus = num_possible_cpus();
77#endif
78
79 nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
80 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
81
82 /* Never allocate more than 0.5 locks per bucket */
83 size = min_t(unsigned int, size, tbl->size >> 1);
84
85 if (tbl->nest)
86 size = min(size, 1U << tbl->nest);
87
88 if (sizeof(spinlock_t) != 0) {
89 if (gfpflags_allow_blocking(gfp))
90 tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
91 else
92 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
93 gfp);
94 if (!tbl->locks)
95 return -ENOMEM;
96 for (i = 0; i < size; i++)
97 spin_lock_init(&tbl->locks[i]);
98 }
99 tbl->locks_mask = size - 1;
100
101 return 0;
102}
103
104static void nested_table_free(union nested_table *ntbl, unsigned int size) 68static void nested_table_free(union nested_table *ntbl, unsigned int size)
105{ 69{
106 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 70 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
@@ -140,7 +104,7 @@ static void bucket_table_free(const struct bucket_table *tbl)
140 if (tbl->nest) 104 if (tbl->nest)
141 nested_bucket_table_free(tbl); 105 nested_bucket_table_free(tbl);
142 106
143 kvfree(tbl->locks); 107 free_bucket_spinlocks(tbl->locks);
144 kvfree(tbl); 108 kvfree(tbl);
145} 109}
146 110
@@ -207,7 +171,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
207 gfp_t gfp) 171 gfp_t gfp)
208{ 172{
209 struct bucket_table *tbl = NULL; 173 struct bucket_table *tbl = NULL;
210 size_t size; 174 size_t size, max_locks;
211 int i; 175 int i;
212 176
213 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 177 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
@@ -227,7 +191,12 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
227 191
228 tbl->size = size; 192 tbl->size = size;
229 193
230 if (alloc_bucket_locks(ht, tbl, gfp) < 0) { 194 max_locks = size >> 1;
195 if (tbl->nest)
196 max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
197
198 if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
199 ht->p.locks_mul, gfp) < 0) {
231 bucket_table_free(tbl); 200 bucket_table_free(tbl);
232 return NULL; 201 return NULL;
233 } 202 }