aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-03-20 06:57:00 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-20 16:16:24 -0400
commit02fd97c3d4a8a14e222b0021c366db7041d28743 (patch)
tree5bce8d1e7f57aefa89073934042d2907408fd403
parent488fb86ee91d3b1182c2e30a9f9b45da14eda46f (diff)
rhashtable: Allow hash/comparison functions to be inlined
This patch deals with the complaint that we make indirect function calls on the fast paths unnecessarily in rhashtable. We resolve it by moving the fast paths into inline functions that take struct rhashtable_param (which obviously must be the same set of parameters supplied to rhashtable_init) as an argument. The only remaining indirect call is to obj_hashfn (or key_hashfn it obj_hashfn is unset) on the rehash as well as the insert-during- rehash slow path. This patch also extends the support of vairable-length keys to include those where the key is fixed but scattered in the object. For example, in netlink we want to key off the namespace and the portid but they're not next to each other. This patch does this by directly using the object hash function as the indicator of whether the key is accessible or not. It also adds a new function obj_cmpfn to compare a key against an object. This means that the caller no longer needs to supply explicit compare functions. All this is done in a backwards compatible manner so no existing users are affected until they convert to the new interface. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/rhashtable.h386
-rw-r--r--lib/rhashtable.c163
2 files changed, 436 insertions, 113 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index c85363c45fc0..a7188eeb135b 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -22,6 +22,7 @@
22#include <linux/list_nulls.h> 22#include <linux/list_nulls.h>
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/rcupdate.h>
25 26
26/* 27/*
27 * The end of the chain is marked with a special nulls marks which has 28 * The end of the chain is marked with a special nulls marks which has
@@ -42,6 +43,9 @@
42#define RHT_HASH_BITS 27 43#define RHT_HASH_BITS 27
43#define RHT_BASE_SHIFT RHT_HASH_BITS 44#define RHT_BASE_SHIFT RHT_HASH_BITS
44 45
46/* Base bits plus 1 bit for nulls marker */
47#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
48
45struct rhash_head { 49struct rhash_head {
46 struct rhash_head __rcu *next; 50 struct rhash_head __rcu *next;
47}; 51};
@@ -72,8 +76,20 @@ struct bucket_table {
72 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; 76 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
73}; 77};
74 78
79/**
80 * struct rhashtable_compare_arg - Key for the function rhashtable_compare
81 * @ht: Hash table
82 * @key: Key to compare against
83 */
84struct rhashtable_compare_arg {
85 struct rhashtable *ht;
86 const void *key;
87};
88
75typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); 89typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
76typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed); 90typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);
91typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
92 const void *obj);
77 93
78struct rhashtable; 94struct rhashtable;
79 95
@@ -89,6 +105,7 @@ struct rhashtable;
89 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) 105 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
90 * @hashfn: Function to hash key 106 * @hashfn: Function to hash key
91 * @obj_hashfn: Function to hash object 107 * @obj_hashfn: Function to hash object
108 * @obj_cmpfn: Function to compare key with object
92 */ 109 */
93struct rhashtable_params { 110struct rhashtable_params {
94 size_t nelem_hint; 111 size_t nelem_hint;
@@ -101,6 +118,7 @@ struct rhashtable_params {
101 size_t locks_mul; 118 size_t locks_mul;
102 rht_hashfn_t hashfn; 119 rht_hashfn_t hashfn;
103 rht_obj_hashfn_t obj_hashfn; 120 rht_obj_hashfn_t obj_hashfn;
121 rht_obj_cmpfn_t obj_cmpfn;
104}; 122};
105 123
106/** 124/**
@@ -165,6 +183,83 @@ static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
165 return ((unsigned long) ptr) >> 1; 183 return ((unsigned long) ptr) >> 1;
166} 184}
167 185
186static inline void *rht_obj(const struct rhashtable *ht,
187 const struct rhash_head *he)
188{
189 return (char *)he - ht->p.head_offset;
190}
191
192static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
193 unsigned int hash)
194{
195 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
196}
197
198static inline unsigned int rht_key_hashfn(
199 struct rhashtable *ht, const struct bucket_table *tbl,
200 const void *key, const struct rhashtable_params params)
201{
202 return rht_bucket_index(tbl, params.hashfn(key, params.key_len ?:
203 ht->p.key_len,
204 tbl->hash_rnd));
205}
206
207static inline unsigned int rht_head_hashfn(
208 struct rhashtable *ht, const struct bucket_table *tbl,
209 const struct rhash_head *he, const struct rhashtable_params params)
210{
211 const char *ptr = rht_obj(ht, he);
212
213 return likely(params.obj_hashfn) ?
214 rht_bucket_index(tbl, params.obj_hashfn(ptr, tbl->hash_rnd)) :
215 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
216}
217
218/**
219 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
220 * @ht: hash table
221 * @tbl: current table
222 */
223static inline bool rht_grow_above_75(const struct rhashtable *ht,
224 const struct bucket_table *tbl)
225{
226 /* Expand table when exceeding 75% load */
227 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
228 (!ht->p.max_size || tbl->size < ht->p.max_size);
229}
230
231/**
232 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
233 * @ht: hash table
234 * @tbl: current table
235 */
236static inline bool rht_shrink_below_30(const struct rhashtable *ht,
237 const struct bucket_table *tbl)
238{
239 /* Shrink table beneath 30% load */
240 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
241 tbl->size > ht->p.min_size;
242}
243
244/* The bucket lock is selected based on the hash and protects mutations
245 * on a group of hash buckets.
246 *
247 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
248 * a single lock always covers both buckets which may both contains
249 * entries which link to the same bucket of the old table during resizing.
250 * This allows to simplify the locking as locking the bucket in both
251 * tables during resize always guarantee protection.
252 *
253 * IMPORTANT: When holding the bucket lock of both the old and new table
254 * during expansions and shrinking, the old bucket lock must always be
255 * acquired first.
256 */
257static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
258 unsigned int hash)
259{
260 return &tbl->locks[hash & tbl->locks_mask];
261}
262
168#ifdef CONFIG_PROVE_LOCKING 263#ifdef CONFIG_PROVE_LOCKING
169int lockdep_rht_mutex_is_held(struct rhashtable *ht); 264int lockdep_rht_mutex_is_held(struct rhashtable *ht);
170int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); 265int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
@@ -184,6 +279,9 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
184int rhashtable_init(struct rhashtable *ht, 279int rhashtable_init(struct rhashtable *ht,
185 const struct rhashtable_params *params); 280 const struct rhashtable_params *params);
186 281
282int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
283 struct rhash_head *obj,
284 struct bucket_table *old_tbl);
187void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); 285void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
188bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); 286bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
189 287
@@ -356,4 +454,292 @@ void rhashtable_destroy(struct rhashtable *ht);
356 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ 454 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
357 tbl, hash, member) 455 tbl, hash, member)
358 456
457static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
458 const void *obj)
459{
460 struct rhashtable *ht = arg->ht;
461 const char *ptr = obj;
462
463 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
464}
465
466/**
467 * rhashtable_lookup_fast - search hash table, inlined version
468 * @ht: hash table
469 * @key: the pointer to the key
470 * @params: hash table parameters
471 *
472 * Computes the hash value for the key and traverses the bucket chain looking
473 * for a entry with an identical key. The first matching entry is returned.
474 *
475 * Returns the first entry on which the compare function returned true.
476 */
477static inline void *rhashtable_lookup_fast(
478 struct rhashtable *ht, const void *key,
479 const struct rhashtable_params params)
480{
481 struct rhashtable_compare_arg arg = {
482 .ht = ht,
483 .key = key,
484 };
485 const struct bucket_table *tbl;
486 struct rhash_head *he;
487 unsigned hash;
488
489 rcu_read_lock();
490
491 tbl = rht_dereference_rcu(ht->tbl, ht);
492restart:
493 hash = rht_key_hashfn(ht, tbl, key, params);
494 rht_for_each_rcu(he, tbl, hash) {
495 if (params.obj_cmpfn ?
496 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
497 rhashtable_compare(&arg, rht_obj(ht, he)))
498 continue;
499 rcu_read_unlock();
500 return rht_obj(ht, he);
501 }
502
503 /* Ensure we see any new tables. */
504 smp_rmb();
505
506 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
507 if (unlikely(tbl))
508 goto restart;
509 rcu_read_unlock();
510
511 return NULL;
512}
513
514static inline int __rhashtable_insert_fast(
515 struct rhashtable *ht, const void *key, struct rhash_head *obj,
516 const struct rhashtable_params params)
517{
518 struct rhashtable_compare_arg arg = {
519 .ht = ht,
520 .key = key,
521 };
522 int err = -EEXIST;
523 struct bucket_table *tbl, *new_tbl;
524 struct rhash_head *head;
525 spinlock_t *lock;
526 unsigned hash;
527
528 rcu_read_lock();
529
530 tbl = rht_dereference_rcu(ht->tbl, ht);
531 hash = rht_head_hashfn(ht, tbl, obj, params);
532 lock = rht_bucket_lock(tbl, hash);
533
534 spin_lock_bh(lock);
535
536 /* Because we have already taken the bucket lock in tbl,
537 * if we find that future_tbl is not yet visible then
538 * that guarantees all other insertions of the same entry
539 * will also grab the bucket lock in tbl because until
540 * the rehash completes ht->tbl won't be changed.
541 */
542 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
543 if (unlikely(new_tbl)) {
544 err = rhashtable_insert_slow(ht, key, obj, new_tbl);
545 goto out;
546 }
547
548 if (!key)
549 goto skip_lookup;
550
551 rht_for_each(head, tbl, hash) {
552 if (unlikely(!(params.obj_cmpfn ?
553 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
554 rhashtable_compare(&arg, rht_obj(ht, head)))))
555 goto out;
556 }
557
558skip_lookup:
559 err = 0;
560
561 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
562
563 RCU_INIT_POINTER(obj->next, head);
564
565 rcu_assign_pointer(tbl->buckets[hash], obj);
566
567 atomic_inc(&ht->nelems);
568 if (rht_grow_above_75(ht, tbl))
569 schedule_work(&ht->run_work);
570
571out:
572 spin_unlock_bh(lock);
573 rcu_read_unlock();
574
575 return err;
576}
577
578/**
579 * rhashtable_insert_fast - insert object into hash table
580 * @ht: hash table
581 * @obj: pointer to hash head inside object
582 * @params: hash table parameters
583 *
584 * Will take a per bucket spinlock to protect against mutual mutations
585 * on the same bucket. Multiple insertions may occur in parallel unless
586 * they map to the same bucket lock.
587 *
588 * It is safe to call this function from atomic context.
589 *
590 * Will trigger an automatic deferred table resizing if the size grows
591 * beyond the watermark indicated by grow_decision() which can be passed
592 * to rhashtable_init().
593 */
594static inline int rhashtable_insert_fast(
595 struct rhashtable *ht, struct rhash_head *obj,
596 const struct rhashtable_params params)
597{
598 return __rhashtable_insert_fast(ht, NULL, obj, params);
599}
600
601/**
602 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
603 * @ht: hash table
604 * @obj: pointer to hash head inside object
605 * @params: hash table parameters
606 *
607 * Locks down the bucket chain in both the old and new table if a resize
608 * is in progress to ensure that writers can't remove from the old table
609 * and can't insert to the new table during the atomic operation of search
610 * and insertion. Searches for duplicates in both the old and new table if
611 * a resize is in progress.
612 *
613 * This lookup function may only be used for fixed key hash table (key_len
614 * parameter set). It will BUG() if used inappropriately.
615 *
616 * It is safe to call this function from atomic context.
617 *
618 * Will trigger an automatic deferred table resizing if the size grows
619 * beyond the watermark indicated by grow_decision() which can be passed
620 * to rhashtable_init().
621 */
622static inline int rhashtable_lookup_insert_fast(
623 struct rhashtable *ht, struct rhash_head *obj,
624 const struct rhashtable_params params)
625{
626 const char *key = rht_obj(ht, obj);
627
628 BUG_ON(ht->p.obj_hashfn);
629
630 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
631 params);
632}
633
634/**
635 * rhashtable_lookup_insert_key - search and insert object to hash table
636 * with explicit key
637 * @ht: hash table
638 * @key: key
639 * @obj: pointer to hash head inside object
640 * @params: hash table parameters
641 *
642 * Locks down the bucket chain in both the old and new table if a resize
643 * is in progress to ensure that writers can't remove from the old table
644 * and can't insert to the new table during the atomic operation of search
645 * and insertion. Searches for duplicates in both the old and new table if
646 * a resize is in progress.
647 *
648 * Lookups may occur in parallel with hashtable mutations and resizing.
649 *
650 * Will trigger an automatic deferred table resizing if the size grows
651 * beyond the watermark indicated by grow_decision() which can be passed
652 * to rhashtable_init().
653 *
654 * Returns zero on success.
655 */
656static inline int rhashtable_lookup_insert_key(
657 struct rhashtable *ht, const void *key, struct rhash_head *obj,
658 const struct rhashtable_params params)
659{
660 BUG_ON(!ht->p.obj_hashfn || !key);
661
662 return __rhashtable_insert_fast(ht, key, obj, params);
663}
664
665static inline int __rhashtable_remove_fast(
666 struct rhashtable *ht, struct bucket_table *tbl,
667 struct rhash_head *obj, const struct rhashtable_params params)
668{
669 struct rhash_head __rcu **pprev;
670 struct rhash_head *he;
671 spinlock_t * lock;
672 unsigned hash;
673 int err = -ENOENT;
674
675 hash = rht_head_hashfn(ht, tbl, obj, params);
676 lock = rht_bucket_lock(tbl, hash);
677
678 spin_lock_bh(lock);
679
680 pprev = &tbl->buckets[hash];
681 rht_for_each(he, tbl, hash) {
682 if (he != obj) {
683 pprev = &he->next;
684 continue;
685 }
686
687 rcu_assign_pointer(*pprev, obj->next);
688 err = 0;
689 break;
690 }
691
692 spin_unlock_bh(lock);
693
694 return err;
695}
696
697/**
698 * rhashtable_remove_fast - remove object from hash table
699 * @ht: hash table
700 * @obj: pointer to hash head inside object
701 * @params: hash table parameters
702 *
703 * Since the hash chain is single linked, the removal operation needs to
704 * walk the bucket chain upon removal. The removal operation is thus
705 * considerable slow if the hash table is not correctly sized.
706 *
707 * Will automatically shrink the table via rhashtable_expand() if the
708 * shrink_decision function specified at rhashtable_init() returns true.
709 *
710 * Returns zero on success, -ENOENT if the entry could not be found.
711 */
712static inline int rhashtable_remove_fast(
713 struct rhashtable *ht, struct rhash_head *obj,
714 const struct rhashtable_params params)
715{
716 struct bucket_table *tbl;
717 int err;
718
719 rcu_read_lock();
720
721 tbl = rht_dereference_rcu(ht->tbl, ht);
722
723 /* Because we have already taken (and released) the bucket
724 * lock in old_tbl, if we find that future_tbl is not yet
725 * visible then that guarantees the entry to still be in
726 * the old tbl if it exists.
727 */
728 while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
729 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
730 ;
731
732 if (err)
733 goto out;
734
735 atomic_dec(&ht->nelems);
736 if (rht_shrink_below_30(ht, tbl))
737 schedule_work(&ht->run_work);
738
739out:
740 rcu_read_unlock();
741
742 return err;
743}
744
359#endif /* _LINUX_RHASHTABLE_H */ 745#endif /* _LINUX_RHASHTABLE_H */
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index e0a9d59f80d6..d1d23fb58525 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -1,13 +1,13 @@
1/* 1/*
2 * Resizable, Scalable, Concurrent Hash Table 2 * Resizable, Scalable, Concurrent Hash Table
3 * 3 *
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
4 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6 * 7 *
7 * Based on the following paper:
8 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9 *
10 * Code partially derived from nft_hash 8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -30,53 +30,11 @@
30#define HASH_MIN_SIZE 4U 30#define HASH_MIN_SIZE 4U
31#define BUCKET_LOCKS_PER_CPU 128UL 31#define BUCKET_LOCKS_PER_CPU 128UL
32 32
33/* Base bits plus 1 bit for nulls marker */
34#define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
35
36/* The bucket lock is selected based on the hash and protects mutations
37 * on a group of hash buckets.
38 *
39 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
40 * a single lock always covers both buckets which may both contains
41 * entries which link to the same bucket of the old table during resizing.
42 * This allows to simplify the locking as locking the bucket in both
43 * tables during resize always guarantee protection.
44 *
45 * IMPORTANT: When holding the bucket lock of both the old and new table
46 * during expansions and shrinking, the old bucket lock must always be
47 * acquired first.
48 */
49static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
50{
51 return &tbl->locks[hash & tbl->locks_mask];
52}
53
54static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
55{
56 return (void *) he - ht->p.head_offset;
57}
58
59static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
60{
61 return (hash >> HASH_RESERVED_SPACE) & (tbl->size - 1);
62}
63
64static u32 key_hashfn(struct rhashtable *ht, const struct bucket_table *tbl,
65 const void *key)
66{
67 return rht_bucket_index(tbl, ht->p.hashfn(key, ht->p.key_len,
68 tbl->hash_rnd));
69}
70
71static u32 head_hashfn(struct rhashtable *ht, 33static u32 head_hashfn(struct rhashtable *ht,
72 const struct bucket_table *tbl, 34 const struct bucket_table *tbl,
73 const struct rhash_head *he) 35 const struct rhash_head *he)
74{ 36{
75 const char *ptr = rht_obj(ht, he); 37 return rht_head_hashfn(ht, tbl, he, ht->p);
76
77 return likely(ht->p.key_len) ?
78 key_hashfn(ht, tbl, ptr + ht->p.key_offset) :
79 rht_bucket_index(tbl, ht->p.obj_hashfn(ptr, tbl->hash_rnd));
80} 38}
81 39
82#ifdef CONFIG_PROVE_LOCKING 40#ifdef CONFIG_PROVE_LOCKING
@@ -90,7 +48,7 @@ EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
90 48
91int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) 49int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
92{ 50{
93 spinlock_t *lock = bucket_lock(tbl, hash); 51 spinlock_t *lock = rht_bucket_lock(tbl, hash);
94 52
95 return (debug_locks) ? lockdep_is_held(lock) : 1; 53 return (debug_locks) ? lockdep_is_held(lock) : 1;
96} 54}
@@ -178,32 +136,6 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
178 return tbl; 136 return tbl;
179} 137}
180 138
181/**
182 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
183 * @ht: hash table
184 * @tbl: current table
185 */
186static bool rht_grow_above_75(const struct rhashtable *ht,
187 const struct bucket_table *tbl)
188{
189 /* Expand table when exceeding 75% load */
190 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
191 (!ht->p.max_size || tbl->size < ht->p.max_size);
192}
193
194/**
195 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
196 * @ht: hash table
197 * @tbl: current table
198 */
199static bool rht_shrink_below_30(const struct rhashtable *ht,
200 const struct bucket_table *tbl)
201{
202 /* Shrink table beneath 30% load */
203 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
204 tbl->size > ht->p.min_size;
205}
206
207static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) 139static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
208{ 140{
209 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 141 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
@@ -230,7 +162,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
230 162
231 new_hash = head_hashfn(ht, new_tbl, entry); 163 new_hash = head_hashfn(ht, new_tbl, entry);
232 164
233 new_bucket_lock = bucket_lock(new_tbl, new_hash); 165 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
234 166
235 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); 167 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
236 head = rht_dereference_bucket(new_tbl->buckets[new_hash], 168 head = rht_dereference_bucket(new_tbl->buckets[new_hash],
@@ -255,7 +187,7 @@ static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash)
255 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 187 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
256 spinlock_t *old_bucket_lock; 188 spinlock_t *old_bucket_lock;
257 189
258 old_bucket_lock = bucket_lock(old_tbl, old_hash); 190 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
259 191
260 spin_lock_bh(old_bucket_lock); 192 spin_lock_bh(old_bucket_lock);
261 while (!rhashtable_rehash_one(ht, old_hash)) 193 while (!rhashtable_rehash_one(ht, old_hash))
@@ -376,6 +308,37 @@ unlock:
376 mutex_unlock(&ht->mutex); 308 mutex_unlock(&ht->mutex);
377} 309}
378 310
311int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
312 struct rhash_head *obj,
313 struct bucket_table *tbl)
314{
315 struct rhash_head *head;
316 unsigned hash;
317 int err = -EEXIST;
318
319 hash = head_hashfn(ht, tbl, obj);
320 spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
321
322 if (key && rhashtable_lookup_fast(ht, key, ht->p))
323 goto exit;
324
325 err = 0;
326
327 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
328
329 RCU_INIT_POINTER(obj->next, head);
330
331 rcu_assign_pointer(tbl->buckets[hash], obj);
332
333 atomic_inc(&ht->nelems);
334
335exit:
336 spin_unlock(rht_bucket_lock(tbl, hash));
337
338 return err;
339}
340EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
341
379static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, 342static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
380 bool (*compare)(void *, void *), void *arg) 343 bool (*compare)(void *, void *), void *arg)
381{ 344{
@@ -390,7 +353,7 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
390 353
391 old_tbl = rht_dereference_rcu(ht->tbl, ht); 354 old_tbl = rht_dereference_rcu(ht->tbl, ht);
392 hash = head_hashfn(ht, old_tbl, obj); 355 hash = head_hashfn(ht, old_tbl, obj);
393 old_lock = bucket_lock(old_tbl, hash); 356 old_lock = rht_bucket_lock(old_tbl, hash);
394 357
395 spin_lock_bh(old_lock); 358 spin_lock_bh(old_lock);
396 359
@@ -403,7 +366,8 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
403 tbl = rht_dereference_rcu(old_tbl->future_tbl, ht) ?: old_tbl; 366 tbl = rht_dereference_rcu(old_tbl->future_tbl, ht) ?: old_tbl;
404 if (tbl != old_tbl) { 367 if (tbl != old_tbl) {
405 hash = head_hashfn(ht, tbl, obj); 368 hash = head_hashfn(ht, tbl, obj);
406 spin_lock_nested(bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); 369 spin_lock_nested(rht_bucket_lock(tbl, hash),
370 SINGLE_DEPTH_NESTING);
407 } 371 }
408 372
409 if (compare && 373 if (compare &&
@@ -430,7 +394,7 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
430 394
431exit: 395exit:
432 if (tbl != old_tbl) 396 if (tbl != old_tbl)
433 spin_unlock(bucket_lock(tbl, hash)); 397 spin_unlock(rht_bucket_lock(tbl, hash));
434 398
435 spin_unlock_bh(old_lock); 399 spin_unlock_bh(old_lock);
436 400
@@ -471,7 +435,7 @@ static bool __rhashtable_remove(struct rhashtable *ht,
471 bool ret = false; 435 bool ret = false;
472 436
473 hash = head_hashfn(ht, tbl, obj); 437 hash = head_hashfn(ht, tbl, obj);
474 lock = bucket_lock(tbl, hash); 438 lock = rht_bucket_lock(tbl, hash);
475 439
476 spin_lock_bh(lock); 440 spin_lock_bh(lock);
477 441
@@ -537,19 +501,6 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
537} 501}
538EXPORT_SYMBOL_GPL(rhashtable_remove); 502EXPORT_SYMBOL_GPL(rhashtable_remove);
539 503
540struct rhashtable_compare_arg {
541 struct rhashtable *ht;
542 const void *key;
543};
544
545static bool rhashtable_compare(void *ptr, void *arg)
546{
547 struct rhashtable_compare_arg *x = arg;
548 struct rhashtable *ht = x->ht;
549
550 return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
551}
552
553/** 504/**
554 * rhashtable_lookup - lookup key in hash table 505 * rhashtable_lookup - lookup key in hash table
555 * @ht: hash table 506 * @ht: hash table
@@ -565,14 +516,7 @@ static bool rhashtable_compare(void *ptr, void *arg)
565 */ 516 */
566void *rhashtable_lookup(struct rhashtable *ht, const void *key) 517void *rhashtable_lookup(struct rhashtable *ht, const void *key)
567{ 518{
568 struct rhashtable_compare_arg arg = { 519 return rhashtable_lookup_fast(ht, key, ht->p);
569 .ht = ht,
570 .key = key,
571 };
572
573 BUG_ON(!ht->p.key_len);
574
575 return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
576} 520}
577EXPORT_SYMBOL_GPL(rhashtable_lookup); 521EXPORT_SYMBOL_GPL(rhashtable_lookup);
578 522
@@ -591,7 +535,8 @@ EXPORT_SYMBOL_GPL(rhashtable_lookup);
591 * Returns the first entry on which the compare function returned true. 535 * Returns the first entry on which the compare function returned true.
592 */ 536 */
593void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, 537void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
594 bool (*compare)(void *, void *), void *arg) 538 bool (*compare)(void *, void *),
539 void *arg)
595{ 540{
596 const struct bucket_table *tbl; 541 const struct bucket_table *tbl;
597 struct rhash_head *he; 542 struct rhash_head *he;
@@ -601,7 +546,7 @@ void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
601 546
602 tbl = rht_dereference_rcu(ht->tbl, ht); 547 tbl = rht_dereference_rcu(ht->tbl, ht);
603restart: 548restart:
604 hash = key_hashfn(ht, tbl, key); 549 hash = rht_key_hashfn(ht, tbl, key, ht->p);
605 rht_for_each_rcu(he, tbl, hash) { 550 rht_for_each_rcu(he, tbl, hash) {
606 if (!compare(rht_obj(ht, he), arg)) 551 if (!compare(rht_obj(ht, he), arg))
607 continue; 552 continue;
@@ -643,15 +588,7 @@ EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
643 */ 588 */
644bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj) 589bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
645{ 590{
646 struct rhashtable_compare_arg arg = { 591 return rhashtable_lookup_insert_fast(ht, obj, ht->p);
647 .ht = ht,
648 .key = rht_obj(ht, obj) + ht->p.key_offset,
649 };
650
651 BUG_ON(!ht->p.key_len);
652
653 return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
654 &arg);
655} 592}
656EXPORT_SYMBOL_GPL(rhashtable_lookup_insert); 593EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
657 594
@@ -927,8 +864,8 @@ int rhashtable_init(struct rhashtable *ht,
927 864
928 size = HASH_DEFAULT_SIZE; 865 size = HASH_DEFAULT_SIZE;
929 866
930 if ((params->key_len && !params->hashfn) || 867 if ((!(params->key_len && params->hashfn) && !params->obj_hashfn) ||
931 (!params->key_len && !params->obj_hashfn)) 868 (params->obj_hashfn && !params->obj_cmpfn))
932 return -EINVAL; 869 return -EINVAL;
933 870
934 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) 871 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))