aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorYing Xue <ying.xue@windriver.com>2015-01-07 00:41:53 -0500
committerDavid S. Miller <davem@davemloft.net>2015-01-08 22:47:10 -0500
commit54c5b7d311c8e1801f9dcce9f388a7420a25fa90 (patch)
tree9bbd313b7a6676fb1752abf7dd3fe736c18114bc /lib
parentefb975a67ea7846b966080f999589de570686aa0 (diff)
rhashtable: introduce rhashtable_wakeup_worker helper function
Introduce rhashtable_wakeup_worker() helper function to reduce duplicated code where to wake up worker. By the way, as long as the both "future_tbl" and "tbl" bucket table pointers point to the same bucket array, we should try to wake up the resizing worker thread, otherwise, it indicates the work of resizing hash table is not finished yet. However, currently we will wake up the worker thread only when the two pointers point to different bucket array. Obviously this is wrong. So, the issue is also fixed as well in the patch. Signed-off-by: Ying Xue <ying.xue@windriver.com> Cc: Thomas Graf <tgraf@suug.ch> Acked-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index f2fdd7a7cb16..20006854fce0 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -492,6 +492,19 @@ static void rht_deferred_worker(struct work_struct *work)
492 mutex_unlock(&ht->mutex); 492 mutex_unlock(&ht->mutex);
493} 493}
494 494
495static void rhashtable_wakeup_worker(struct rhashtable *ht)
496{
497 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
498 struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
499 size_t size = tbl->size;
500
501 /* Only adjust the table if no resizing is currently in progress. */
502 if (tbl == new_tbl &&
503 ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
504 (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
505 schedule_delayed_work(&ht->run_work, 0);
506}
507
495/** 508/**
496 * rhashtable_insert - insert object into hash hash table 509 * rhashtable_insert - insert object into hash hash table
497 * @ht: hash table 510 * @ht: hash table
@@ -532,10 +545,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
532 545
533 atomic_inc(&ht->nelems); 546 atomic_inc(&ht->nelems);
534 547
535 /* Only grow the table if no resizing is currently in progress. */ 548 rhashtable_wakeup_worker(ht);
536 if (ht->tbl != ht->future_tbl &&
537 ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
538 schedule_delayed_work(&ht->run_work, 0);
539 549
540 rcu_read_unlock(); 550 rcu_read_unlock();
541} 551}
@@ -584,10 +594,7 @@ restart:
584 594
585 spin_unlock_bh(lock); 595 spin_unlock_bh(lock);
586 596
587 if (ht->tbl != ht->future_tbl && 597 rhashtable_wakeup_worker(ht);
588 ht->p.shrink_decision &&
589 ht->p.shrink_decision(ht, tbl->size))
590 schedule_delayed_work(&ht->run_work, 0);
591 598
592 rcu_read_unlock(); 599 rcu_read_unlock();
593 600