aboutsummaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2015-03-12 10:28:40 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-12 23:02:30 -0400
commita5b6846f9e1a080493210013385c28faecee36f0 (patch)
tree6b068157f7e2efb5d71978d3a63d14754bcd6a7c /lib/rhashtable.c
parent9497df88ab5567daa001829051c5f87161a81ff0 (diff)
rhashtable: kill ht->shift atomic operations
Commit c0c09bfdc415 ("rhashtable: avoid unnecessary wakeup for worker queue") changed ht->shift to be atomic, which is actually unnecessary. Instead of leaving the current shift in the core rhashtable structure, it can be cached inside the individual bucket tables. There, it will only be initialized once during a new table allocation in the shrink/expansion slow path, and from then onward it stays immutable for the rest of the bucket table liftime. That allows shift to be non-atomic. The patch also moves hash_rnd management into the table setup. The rhashtable structure now consumes 3 instead of 4 cachelines. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Ying Xue <ying.xue@windriver.com> Acked-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c55
1 files changed, 25 insertions, 30 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 68210cc2bab8..adea791ea3ab 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -147,7 +147,7 @@ static void bucket_table_free(const struct bucket_table *tbl)
147} 147}
148 148
149static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, 149static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
150 size_t nbuckets) 150 size_t nbuckets, u32 hash_rnd)
151{ 151{
152 struct bucket_table *tbl = NULL; 152 struct bucket_table *tbl = NULL;
153 size_t size; 153 size_t size;
@@ -162,6 +162,8 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
162 return NULL; 162 return NULL;
163 163
164 tbl->size = nbuckets; 164 tbl->size = nbuckets;
165 tbl->shift = ilog2(nbuckets);
166 tbl->hash_rnd = hash_rnd;
165 167
166 if (alloc_bucket_locks(ht, tbl) < 0) { 168 if (alloc_bucket_locks(ht, tbl) < 0) {
167 bucket_table_free(tbl); 169 bucket_table_free(tbl);
@@ -177,25 +179,27 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
177/** 179/**
178 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size 180 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
179 * @ht: hash table 181 * @ht: hash table
180 * @new_size: new table size 182 * @tbl: current table
181 */ 183 */
182static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) 184static bool rht_grow_above_75(const struct rhashtable *ht,
185 const struct bucket_table *tbl)
183{ 186{
184 /* Expand table when exceeding 75% load */ 187 /* Expand table when exceeding 75% load */
185 return atomic_read(&ht->nelems) > (new_size / 4 * 3) && 188 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
186 (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift); 189 (!ht->p.max_shift || tbl->shift < ht->p.max_shift);
187} 190}
188 191
189/** 192/**
190 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size 193 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
191 * @ht: hash table 194 * @ht: hash table
192 * @new_size: new table size 195 * @tbl: current table
193 */ 196 */
194static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) 197static bool rht_shrink_below_30(const struct rhashtable *ht,
198 const struct bucket_table *tbl)
195{ 199{
196 /* Shrink table beneath 30% load */ 200 /* Shrink table beneath 30% load */
197 return atomic_read(&ht->nelems) < (new_size * 3 / 10) && 201 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
198 (atomic_read(&ht->shift) > ht->p.min_shift); 202 tbl->shift > ht->p.min_shift;
199} 203}
200 204
201static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) 205static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
@@ -310,16 +314,11 @@ int rhashtable_expand(struct rhashtable *ht)
310 314
311 ASSERT_RHT_MUTEX(ht); 315 ASSERT_RHT_MUTEX(ht);
312 316
313 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); 317 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, old_tbl->hash_rnd);
314 if (new_tbl == NULL) 318 if (new_tbl == NULL)
315 return -ENOMEM; 319 return -ENOMEM;
316 320
317 new_tbl->hash_rnd = old_tbl->hash_rnd;
318
319 atomic_inc(&ht->shift);
320
321 rhashtable_rehash(ht, new_tbl); 321 rhashtable_rehash(ht, new_tbl);
322
323 return 0; 322 return 0;
324} 323}
325EXPORT_SYMBOL_GPL(rhashtable_expand); 324EXPORT_SYMBOL_GPL(rhashtable_expand);
@@ -342,20 +341,15 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
342 */ 341 */
343int rhashtable_shrink(struct rhashtable *ht) 342int rhashtable_shrink(struct rhashtable *ht)
344{ 343{
345 struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht); 344 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
346 345
347 ASSERT_RHT_MUTEX(ht); 346 ASSERT_RHT_MUTEX(ht);
348 347
349 new_tbl = bucket_table_alloc(ht, tbl->size / 2); 348 new_tbl = bucket_table_alloc(ht, old_tbl->size / 2, old_tbl->hash_rnd);
350 if (new_tbl == NULL) 349 if (new_tbl == NULL)
351 return -ENOMEM; 350 return -ENOMEM;
352 351
353 new_tbl->hash_rnd = tbl->hash_rnd;
354
355 atomic_dec(&ht->shift);
356
357 rhashtable_rehash(ht, new_tbl); 352 rhashtable_rehash(ht, new_tbl);
358
359 return 0; 353 return 0;
360} 354}
361EXPORT_SYMBOL_GPL(rhashtable_shrink); 355EXPORT_SYMBOL_GPL(rhashtable_shrink);
@@ -376,9 +370,9 @@ static void rht_deferred_worker(struct work_struct *work)
376 list_for_each_entry(walker, &ht->walkers, list) 370 list_for_each_entry(walker, &ht->walkers, list)
377 walker->resize = true; 371 walker->resize = true;
378 372
379 if (rht_grow_above_75(ht, tbl->size)) 373 if (rht_grow_above_75(ht, tbl))
380 rhashtable_expand(ht); 374 rhashtable_expand(ht);
381 else if (rht_shrink_below_30(ht, tbl->size)) 375 else if (rht_shrink_below_30(ht, tbl))
382 rhashtable_shrink(ht); 376 rhashtable_shrink(ht);
383unlock: 377unlock:
384 mutex_unlock(&ht->mutex); 378 mutex_unlock(&ht->mutex);
@@ -431,7 +425,7 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
431 rcu_assign_pointer(tbl->buckets[hash], obj); 425 rcu_assign_pointer(tbl->buckets[hash], obj);
432 426
433 atomic_inc(&ht->nelems); 427 atomic_inc(&ht->nelems);
434 if (no_resize_running && rht_grow_above_75(ht, tbl->size)) 428 if (no_resize_running && rht_grow_above_75(ht, tbl))
435 schedule_work(&ht->run_work); 429 schedule_work(&ht->run_work);
436 430
437exit: 431exit:
@@ -539,7 +533,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
539 bool no_resize_running = tbl == old_tbl; 533 bool no_resize_running = tbl == old_tbl;
540 534
541 atomic_dec(&ht->nelems); 535 atomic_dec(&ht->nelems);
542 if (no_resize_running && rht_shrink_below_30(ht, tbl->size)) 536 if (no_resize_running && rht_shrink_below_30(ht, tbl))
543 schedule_work(&ht->run_work); 537 schedule_work(&ht->run_work);
544 } 538 }
545 539
@@ -913,6 +907,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
913{ 907{
914 struct bucket_table *tbl; 908 struct bucket_table *tbl;
915 size_t size; 909 size_t size;
910 u32 hash_rnd;
916 911
917 size = HASH_DEFAULT_SIZE; 912 size = HASH_DEFAULT_SIZE;
918 913
@@ -939,14 +934,14 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
939 else 934 else
940 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; 935 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
941 936
942 tbl = bucket_table_alloc(ht, size); 937 get_random_bytes(&hash_rnd, sizeof(hash_rnd));
938
939 tbl = bucket_table_alloc(ht, size, hash_rnd);
943 if (tbl == NULL) 940 if (tbl == NULL)
944 return -ENOMEM; 941 return -ENOMEM;
945 942
946 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
947
948 atomic_set(&ht->nelems, 0); 943 atomic_set(&ht->nelems, 0);
949 atomic_set(&ht->shift, ilog2(tbl->size)); 944
950 RCU_INIT_POINTER(ht->tbl, tbl); 945 RCU_INIT_POINTER(ht->tbl, tbl);
951 RCU_INIT_POINTER(ht->future_tbl, tbl); 946 RCU_INIT_POINTER(ht->future_tbl, tbl);
952 947