aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2015-02-19 18:53:37 -0500
committerDavid S. Miller <davem@davemloft.net>2015-02-20 17:38:09 -0500
commit342100d937ed6e5faf1e7ee7dcd7b3935fec8877 (patch)
tree4d3f668296689371fb6825344cfa02faf40cc5be /lib
parentee92259849b1616e4c23121f78eb1342d2b1ce1e (diff)
rhashtable: don't test for shrink on insert, expansion on delete
Restore pre 54c5b7d311c8 behaviour and only probe for expansions on inserts and shrinks on deletes. Currently, it will happen that on initial inserts into a sparse hash table, we may i.e. shrink it first simply because it's not fully populated yet, only to later realize that we need to grow again. This however is counter intuitive, e.g. an initial default size of 64 elements is already small enough, and in case an elements size hint is given to the hash table by a user, we should avoid unnecessary expansion steps, so a shrink is clearly unintended here. Fixes: 54c5b7d311c8 ("rhashtable: introduce rhashtable_wakeup_worker helper function") Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Ying Xue <ying.xue@windriver.com> Acked-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 9cc4c4a90d00..38f7879df0d8 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -537,16 +537,25 @@ unlock:
537 mutex_unlock(&ht->mutex); 537 mutex_unlock(&ht->mutex);
538} 538}
539 539
540static void rhashtable_wakeup_worker(struct rhashtable *ht) 540static void rhashtable_probe_expand(struct rhashtable *ht)
541{ 541{
542 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); 542 const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
543 struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); 543 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
544 size_t size = tbl->size;
545 544
546 /* Only adjust the table if no resizing is currently in progress. */ 545 /* Only adjust the table if no resizing is currently in progress. */
547 if (tbl == new_tbl && 546 if (tbl == new_tbl && ht->p.grow_decision &&
548 ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) || 547 ht->p.grow_decision(ht, tbl->size))
549 (ht->p.shrink_decision && ht->p.shrink_decision(ht, size)))) 548 schedule_work(&ht->run_work);
549}
550
551static void rhashtable_probe_shrink(struct rhashtable *ht)
552{
553 const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
554 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
555
556 /* Only adjust the table if no resizing is currently in progress. */
557 if (tbl == new_tbl && ht->p.shrink_decision &&
558 ht->p.shrink_decision(ht, tbl->size))
550 schedule_work(&ht->run_work); 559 schedule_work(&ht->run_work);
551} 560}
552 561
@@ -569,7 +578,7 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
569 578
570 atomic_inc(&ht->nelems); 579 atomic_inc(&ht->nelems);
571 580
572 rhashtable_wakeup_worker(ht); 581 rhashtable_probe_expand(ht);
573} 582}
574 583
575/** 584/**
@@ -682,7 +691,7 @@ found:
682 691
683 if (ret) { 692 if (ret) {
684 atomic_dec(&ht->nelems); 693 atomic_dec(&ht->nelems);
685 rhashtable_wakeup_worker(ht); 694 rhashtable_probe_shrink(ht);
686 } 695 }
687 696
688 rcu_read_unlock(); 697 rcu_read_unlock();