diff options
author | Thomas Graf <tgraf@suug.ch> | 2015-01-02 17:00:21 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-01-03 14:32:57 -0500 |
commit | f89bd6f87a53ce5a7d60662429591ebac2745c10 (patch) | |
tree | 69dafc1508219d2f3ee44647734db7c046f0b0f7 /lib/rhashtable.c | |
parent | 97defe1ecf868b8127f8e62395499d6a06e4c4b1 (diff) |
rhashtable: Supports for nulls marker
In order to allow for wider usage of rhashtable, use a special nulls
marker to terminate each chain. The reason for not using the existing
nulls_list is that the prev pointer usage would not be valid as entries
can be linked in two different buckets at the same time.
The 4 nulls base bits can be set through the rhashtable_params structure
like this:
struct rhashtable_params params = {
[...]
.nulls_base = (1U << RHT_BASE_SHIFT),
};
This reduces the hash length from 32 bits to 27 bits.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r-- | lib/rhashtable.c | 37 |
1 files changed, 30 insertions, 7 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 312e3437c7bc..cbad192d3b3d 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -28,6 +28,9 @@ | |||
28 | #define HASH_MIN_SIZE 4UL | 28 | #define HASH_MIN_SIZE 4UL |
29 | #define BUCKET_LOCKS_PER_CPU 128UL | 29 | #define BUCKET_LOCKS_PER_CPU 128UL |
30 | 30 | ||
31 | /* Base bits plus 1 bit for nulls marker */ | ||
32 | #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) | ||
33 | |||
31 | enum { | 34 | enum { |
32 | RHT_LOCK_NORMAL, | 35 | RHT_LOCK_NORMAL, |
33 | RHT_LOCK_NESTED, | 36 | RHT_LOCK_NESTED, |
@@ -86,7 +89,7 @@ static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) | |||
86 | hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, | 89 | hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, |
87 | ht->p.hash_rnd); | 90 | ht->p.hash_rnd); |
88 | 91 | ||
89 | return hash; | 92 | return hash >> HASH_RESERVED_SPACE; |
90 | } | 93 | } |
91 | 94 | ||
92 | static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) | 95 | static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) |
@@ -95,6 +98,7 @@ static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) | |||
95 | u32 hash; | 98 | u32 hash; |
96 | 99 | ||
97 | hash = ht->p.hashfn(key, len, ht->p.hash_rnd); | 100 | hash = ht->p.hashfn(key, len, ht->p.hash_rnd); |
101 | hash >>= HASH_RESERVED_SPACE; | ||
98 | 102 | ||
99 | return rht_bucket_index(tbl, hash); | 103 | return rht_bucket_index(tbl, hash); |
100 | } | 104 | } |
@@ -111,7 +115,7 @@ static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) | |||
111 | struct rhash_head __rcu **pprev; | 115 | struct rhash_head __rcu **pprev; |
112 | 116 | ||
113 | for (pprev = &tbl->buckets[n]; | 117 | for (pprev = &tbl->buckets[n]; |
114 | rht_dereference_bucket(*pprev, tbl, n); | 118 | !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n)); |
115 | pprev = &rht_dereference_bucket(*pprev, tbl, n)->next) | 119 | pprev = &rht_dereference_bucket(*pprev, tbl, n)->next) |
116 | ; | 120 | ; |
117 | 121 | ||
@@ -164,6 +168,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | |||
164 | { | 168 | { |
165 | struct bucket_table *tbl; | 169 | struct bucket_table *tbl; |
166 | size_t size; | 170 | size_t size; |
171 | int i; | ||
167 | 172 | ||
168 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | 173 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
169 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); | 174 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); |
@@ -180,6 +185,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | |||
180 | return NULL; | 185 | return NULL; |
181 | } | 186 | } |
182 | 187 | ||
188 | for (i = 0; i < nbuckets; i++) | ||
189 | INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); | ||
190 | |||
183 | return tbl; | 191 | return tbl; |
184 | } | 192 | } |
185 | 193 | ||
@@ -221,7 +229,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, | |||
221 | /* Old bucket empty, no work needed. */ | 229 | /* Old bucket empty, no work needed. */ |
222 | p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, | 230 | p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, |
223 | old_hash); | 231 | old_hash); |
224 | if (!p) | 232 | if (rht_is_a_nulls(p)) |
225 | return; | 233 | return; |
226 | 234 | ||
227 | new_hash = new_hash2 = head_hashfn(ht, new_tbl, p); | 235 | new_hash = new_hash2 = head_hashfn(ht, new_tbl, p); |
@@ -252,8 +260,8 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, | |||
252 | /* Find the subsequent node which does hash to the same | 260 | /* Find the subsequent node which does hash to the same |
253 | * bucket as node P, or NULL if no such node exists. | 261 | * bucket as node P, or NULL if no such node exists. |
254 | */ | 262 | */ |
255 | next = NULL; | 263 | INIT_RHT_NULLS_HEAD(next, ht, old_hash); |
256 | if (he) { | 264 | if (!rht_is_a_nulls(he)) { |
257 | rht_for_each_continue(he, he->next, old_tbl, old_hash) { | 265 | rht_for_each_continue(he, he->next, old_tbl, old_hash) { |
258 | if (head_hashfn(ht, new_tbl, he) == new_hash) { | 266 | if (head_hashfn(ht, new_tbl, he) == new_hash) { |
259 | next = he; | 267 | next = he; |
@@ -369,11 +377,15 @@ int rhashtable_expand(struct rhashtable *ht) | |||
369 | */ | 377 | */ |
370 | complete = true; | 378 | complete = true; |
371 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { | 379 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { |
380 | struct rhash_head *head; | ||
381 | |||
372 | old_bucket_lock = bucket_lock(old_tbl, old_hash); | 382 | old_bucket_lock = bucket_lock(old_tbl, old_hash); |
373 | spin_lock_bh(old_bucket_lock); | 383 | spin_lock_bh(old_bucket_lock); |
374 | 384 | ||
375 | hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash); | 385 | hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash); |
376 | if (old_tbl->buckets[old_hash] != NULL) | 386 | head = rht_dereference_bucket(old_tbl->buckets[old_hash], |
387 | old_tbl, old_hash); | ||
388 | if (!rht_is_a_nulls(head)) | ||
377 | complete = false; | 389 | complete = false; |
378 | 390 | ||
379 | spin_unlock_bh(old_bucket_lock); | 391 | spin_unlock_bh(old_bucket_lock); |
@@ -498,6 +510,7 @@ static void rht_deferred_worker(struct work_struct *work) | |||
498 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) | 510 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) |
499 | { | 511 | { |
500 | struct bucket_table *tbl; | 512 | struct bucket_table *tbl; |
513 | struct rhash_head *head; | ||
501 | spinlock_t *lock; | 514 | spinlock_t *lock; |
502 | unsigned hash; | 515 | unsigned hash; |
503 | 516 | ||
@@ -508,7 +521,12 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) | |||
508 | lock = bucket_lock(tbl, hash); | 521 | lock = bucket_lock(tbl, hash); |
509 | 522 | ||
510 | spin_lock_bh(lock); | 523 | spin_lock_bh(lock); |
511 | RCU_INIT_POINTER(obj->next, tbl->buckets[hash]); | 524 | head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); |
525 | if (rht_is_a_nulls(head)) | ||
526 | INIT_RHT_NULLS_HEAD(obj->next, ht, hash); | ||
527 | else | ||
528 | RCU_INIT_POINTER(obj->next, head); | ||
529 | |||
512 | rcu_assign_pointer(tbl->buckets[hash], obj); | 530 | rcu_assign_pointer(tbl->buckets[hash], obj); |
513 | spin_unlock_bh(lock); | 531 | spin_unlock_bh(lock); |
514 | 532 | ||
@@ -709,6 +727,7 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) | |||
709 | * .key_offset = offsetof(struct test_obj, key), | 727 | * .key_offset = offsetof(struct test_obj, key), |
710 | * .key_len = sizeof(int), | 728 | * .key_len = sizeof(int), |
711 | * .hashfn = jhash, | 729 | * .hashfn = jhash, |
730 | * .nulls_base = (1U << RHT_BASE_SHIFT), | ||
712 | * }; | 731 | * }; |
713 | * | 732 | * |
714 | * Configuration Example 2: Variable length keys | 733 | * Configuration Example 2: Variable length keys |
@@ -741,6 +760,9 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | |||
741 | (!params->key_len && !params->obj_hashfn)) | 760 | (!params->key_len && !params->obj_hashfn)) |
742 | return -EINVAL; | 761 | return -EINVAL; |
743 | 762 | ||
763 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) | ||
764 | return -EINVAL; | ||
765 | |||
744 | params->min_shift = max_t(size_t, params->min_shift, | 766 | params->min_shift = max_t(size_t, params->min_shift, |
745 | ilog2(HASH_MIN_SIZE)); | 767 | ilog2(HASH_MIN_SIZE)); |
746 | 768 | ||
@@ -974,6 +996,7 @@ static int __init test_rht_init(void) | |||
974 | .key_offset = offsetof(struct test_obj, value), | 996 | .key_offset = offsetof(struct test_obj, value), |
975 | .key_len = sizeof(int), | 997 | .key_len = sizeof(int), |
976 | .hashfn = jhash, | 998 | .hashfn = jhash, |
999 | .nulls_base = (3U << RHT_BASE_SHIFT), | ||
977 | .grow_decision = rht_grow_above_75, | 1000 | .grow_decision = rht_grow_above_75, |
978 | .shrink_decision = rht_shrink_below_30, | 1001 | .shrink_decision = rht_shrink_below_30, |
979 | }; | 1002 | }; |