aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2015-02-25 10:31:54 -0500
committerDavid S. Miller <davem@davemloft.net>2015-02-27 16:06:02 -0500
commit4c4b52d9b2df45e8216d3e30b5452e4a364d2cac (patch)
tree3e4d9c6941729846c3c55c7f256197321311b3aa /lib
parent8331de75cb13fc907ceba78e698c42150e61dda9 (diff)
rhashtable: remove indirection for grow/shrink decision functions
Currently, all real users of rhashtable default their grow and shrink decision functions to rht_grow_above_75() and rht_shrink_below_30(), so that there's currently no need to have this explicitly selectable. It can/should be generic and private inside rhashtable until a real use case pops up. Since we can make this private, we'll save us this additional indirection layer and can improve insertion/deletion time as well. Reference: http://patchwork.ozlabs.org/patch/443040/ Suggested-by: David S. Miller <davem@davemloft.net> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c56
-rw-r--r--lib/test_rhashtable.c1
2 files changed, 18 insertions, 39 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index bcf119bfdef4..090641db4c0d 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -247,26 +247,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
247 * @ht: hash table 247 * @ht: hash table
248 * @new_size: new table size 248 * @new_size: new table size
249 */ 249 */
250bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) 250static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
251{ 251{
252 /* Expand table when exceeding 75% load */ 252 /* Expand table when exceeding 75% load */
253 return atomic_read(&ht->nelems) > (new_size / 4 * 3) && 253 return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
254 (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift); 254 (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
255} 255}
256EXPORT_SYMBOL_GPL(rht_grow_above_75);
257 256
258/** 257/**
259 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size 258 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
260 * @ht: hash table 259 * @ht: hash table
261 * @new_size: new table size 260 * @new_size: new table size
262 */ 261 */
263bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) 262static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
264{ 263{
265 /* Shrink table beneath 30% load */ 264 /* Shrink table beneath 30% load */
266 return atomic_read(&ht->nelems) < (new_size * 3 / 10) && 265 return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
267 (atomic_read(&ht->shift) > ht->p.min_shift); 266 (atomic_read(&ht->shift) > ht->p.min_shift);
268} 267}
269EXPORT_SYMBOL_GPL(rht_shrink_below_30);
270 268
271static void lock_buckets(struct bucket_table *new_tbl, 269static void lock_buckets(struct bucket_table *new_tbl,
272 struct bucket_table *old_tbl, unsigned int hash) 270 struct bucket_table *old_tbl, unsigned int hash)
@@ -528,40 +526,19 @@ static void rht_deferred_worker(struct work_struct *work)
528 list_for_each_entry(walker, &ht->walkers, list) 526 list_for_each_entry(walker, &ht->walkers, list)
529 walker->resize = true; 527 walker->resize = true;
530 528
531 if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) 529 if (rht_grow_above_75(ht, tbl->size))
532 rhashtable_expand(ht); 530 rhashtable_expand(ht);
533 else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) 531 else if (rht_shrink_below_30(ht, tbl->size))
534 rhashtable_shrink(ht); 532 rhashtable_shrink(ht);
535
536unlock: 533unlock:
537 mutex_unlock(&ht->mutex); 534 mutex_unlock(&ht->mutex);
538} 535}
539 536
540static void rhashtable_probe_expand(struct rhashtable *ht)
541{
542 const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
543 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
544
545 /* Only adjust the table if no resizing is currently in progress. */
546 if (tbl == new_tbl && ht->p.grow_decision &&
547 ht->p.grow_decision(ht, tbl->size))
548 schedule_work(&ht->run_work);
549}
550
551static void rhashtable_probe_shrink(struct rhashtable *ht)
552{
553 const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
554 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
555
556 /* Only adjust the table if no resizing is currently in progress. */
557 if (tbl == new_tbl && ht->p.shrink_decision &&
558 ht->p.shrink_decision(ht, tbl->size))
559 schedule_work(&ht->run_work);
560}
561
562static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, 537static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
563 struct bucket_table *tbl, u32 hash) 538 struct bucket_table *tbl,
539 const struct bucket_table *old_tbl, u32 hash)
564{ 540{
541 bool no_resize_running = tbl == old_tbl;
565 struct rhash_head *head; 542 struct rhash_head *head;
566 543
567 hash = rht_bucket_index(tbl, hash); 544 hash = rht_bucket_index(tbl, hash);
@@ -577,8 +554,8 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
577 rcu_assign_pointer(tbl->buckets[hash], obj); 554 rcu_assign_pointer(tbl->buckets[hash], obj);
578 555
579 atomic_inc(&ht->nelems); 556 atomic_inc(&ht->nelems);
580 557 if (no_resize_running && rht_grow_above_75(ht, tbl->size))
581 rhashtable_probe_expand(ht); 558 schedule_work(&ht->run_work);
582} 559}
583 560
584/** 561/**
@@ -608,7 +585,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
608 hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); 585 hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
609 586
610 lock_buckets(tbl, old_tbl, hash); 587 lock_buckets(tbl, old_tbl, hash);
611 __rhashtable_insert(ht, obj, tbl, hash); 588 __rhashtable_insert(ht, obj, tbl, old_tbl, hash);
612 unlock_buckets(tbl, old_tbl, hash); 589 unlock_buckets(tbl, old_tbl, hash);
613 590
614 rcu_read_unlock(); 591 rcu_read_unlock();
@@ -690,8 +667,11 @@ found:
690 unlock_buckets(new_tbl, old_tbl, new_hash); 667 unlock_buckets(new_tbl, old_tbl, new_hash);
691 668
692 if (ret) { 669 if (ret) {
670 bool no_resize_running = new_tbl == old_tbl;
671
693 atomic_dec(&ht->nelems); 672 atomic_dec(&ht->nelems);
694 rhashtable_probe_shrink(ht); 673 if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size))
674 schedule_work(&ht->run_work);
695 } 675 }
696 676
697 rcu_read_unlock(); 677 rcu_read_unlock();
@@ -861,7 +841,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
861 goto exit; 841 goto exit;
862 } 842 }
863 843
864 __rhashtable_insert(ht, obj, new_tbl, new_hash); 844 __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash);
865 845
866exit: 846exit:
867 unlock_buckets(new_tbl, old_tbl, new_hash); 847 unlock_buckets(new_tbl, old_tbl, new_hash);
@@ -1123,8 +1103,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
1123 if (!ht->p.hash_rnd) 1103 if (!ht->p.hash_rnd)
1124 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); 1104 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
1125 1105
1126 if (ht->p.grow_decision || ht->p.shrink_decision) 1106 INIT_WORK(&ht->run_work, rht_deferred_worker);
1127 INIT_WORK(&ht->run_work, rht_deferred_worker);
1128 1107
1129 return 0; 1108 return 0;
1130} 1109}
@@ -1142,8 +1121,7 @@ void rhashtable_destroy(struct rhashtable *ht)
1142{ 1121{
1143 ht->being_destroyed = true; 1122 ht->being_destroyed = true;
1144 1123
1145 if (ht->p.grow_decision || ht->p.shrink_decision) 1124 cancel_work_sync(&ht->run_work);
1146 cancel_work_sync(&ht->run_work);
1147 1125
1148 mutex_lock(&ht->mutex); 1126 mutex_lock(&ht->mutex);
1149 bucket_table_free(rht_dereference(ht->tbl, ht)); 1127 bucket_table_free(rht_dereference(ht->tbl, ht));
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index f9e9d734446a..67c7593d1dd6 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -201,6 +201,7 @@ static int __init test_rht_init(void)
201 .key_offset = offsetof(struct test_obj, value), 201 .key_offset = offsetof(struct test_obj, value),
202 .key_len = sizeof(int), 202 .key_len = sizeof(int),
203 .hashfn = jhash, 203 .hashfn = jhash,
204 .max_shift = 1, /* we expand/shrink manually here */
204 .nulls_base = (3U << RHT_BASE_SHIFT), 205 .nulls_base = (3U << RHT_BASE_SHIFT),
205 }; 206 };
206 int err; 207 int err;