summaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2014-08-02 05:47:44 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-02 22:49:38 -0400
commit7e1e77636e36075ebf118298855268468f1028e8 (patch)
treea885e78ba5cb99f939b45e0de86ace05f23515f7 /lib/rhashtable.c
parentd39a9ffce7f14b494391da982b8cefa311dae0f6 (diff)
lib: Resizable, Scalable, Concurrent Hash Table
Generic implementation of a resizable, scalable, concurrent hash table based on [0]. The implementation supports both, fixed size keys specified via an offset and length, or arbitrary keys via own hash and compare functions. Lookups are lockless and protected as RCU read side critical sections. Automatic growing/shrinking based on user configurable watermarks is available while allowing concurrent lookups to take place. Objects to be hashed must include a struct rhash_head. The reason for not using the existing struct hlist_head is that the expansion and shrinking will have two buckets point to a single entry which would lead in obscure reverse chaining behaviour. Code includes a boot selftest if CONFIG_TEST_RHASHTABLE is defined. [0] https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf Signed-off-by: Thomas Graf <tgraf@suug.ch> Reviewed-by: Nikolay Aleksandrov <nikolay@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c797
1 files changed, 797 insertions, 0 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
new file mode 100644
index 000000000000..e6940cf16628
--- /dev/null
+++ b/lib/rhashtable.c
@@ -0,0 +1,797 @@
1/*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
4 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6 *
7 * Based on the following paper:
8 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9 *
10 * Code partially derived from nft_hash
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/log2.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/mm.h>
23#include <linux/hash.h>
24#include <linux/random.h>
25#include <linux/rhashtable.h>
26#include <linux/log2.h>
27
28#define HASH_DEFAULT_SIZE 64UL
29#define HASH_MIN_SIZE 4UL
30
31#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
32
33#ifdef CONFIG_PROVE_LOCKING
34int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
35{
36 return ht->p.mutex_is_held();
37}
38EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
39#endif
40
41/**
42 * rht_obj - cast hash head to outer object
43 * @ht: hash table
44 * @he: hashed node
45 */
46void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
47{
48 return (void *) he - ht->p.head_offset;
49}
50EXPORT_SYMBOL_GPL(rht_obj);
51
52static u32 __hashfn(const struct rhashtable *ht, const void *key,
53 u32 len, u32 hsize)
54{
55 u32 h;
56
57 h = ht->p.hashfn(key, len, ht->p.hash_rnd);
58
59 return h & (hsize - 1);
60}
61
62/**
63 * rhashtable_hashfn - compute hash for key of given length
64 * @ht: hash table to compuate for
65 * @key: pointer to key
66 * @len: length of key
67 *
68 * Computes the hash value using the hash function provided in the 'hashfn'
69 * of struct rhashtable_params. The returned value is guaranteed to be
70 * smaller than the number of buckets in the hash table.
71 */
72u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
73{
74 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
75
76 return __hashfn(ht, key, len, tbl->size);
77}
78EXPORT_SYMBOL_GPL(rhashtable_hashfn);
79
80static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
81{
82 if (unlikely(!ht->p.key_len)) {
83 u32 h;
84
85 h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
86
87 return h & (hsize - 1);
88 }
89
90 return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
91}
92
93/**
94 * rhashtable_obj_hashfn - compute hash for hashed object
95 * @ht: hash table to compuate for
96 * @ptr: pointer to hashed object
97 *
98 * Computes the hash value using the hash function `hashfn` respectively
99 * 'obj_hashfn' depending on whether the hash table is set up to work with
100 * a fixed length key. The returned value is guaranteed to be smaller than
101 * the number of buckets in the hash table.
102 */
103u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
104{
105 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
106
107 return obj_hashfn(ht, ptr, tbl->size);
108}
109EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
110
111static u32 head_hashfn(const struct rhashtable *ht,
112 const struct rhash_head *he, u32 hsize)
113{
114 return obj_hashfn(ht, rht_obj(ht, he), hsize);
115}
116
117static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags)
118{
119 struct bucket_table *tbl;
120 size_t size;
121
122 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
123 tbl = kzalloc(size, flags);
124 if (tbl == NULL)
125 tbl = vzalloc(size);
126
127 if (tbl == NULL)
128 return NULL;
129
130 tbl->size = nbuckets;
131
132 return tbl;
133}
134
135static void bucket_table_free(const struct bucket_table *tbl)
136{
137 kvfree(tbl);
138}
139
140/**
141 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
142 * @ht: hash table
143 * @new_size: new table size
144 */
145bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
146{
147 /* Expand table when exceeding 75% load */
148 return ht->nelems > (new_size / 4 * 3);
149}
150EXPORT_SYMBOL_GPL(rht_grow_above_75);
151
152/**
153 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
154 * @ht: hash table
155 * @new_size: new table size
156 */
157bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
158{
159 /* Shrink table beneath 30% load */
160 return ht->nelems < (new_size * 3 / 10);
161}
162EXPORT_SYMBOL_GPL(rht_shrink_below_30);
163
164static void hashtable_chain_unzip(const struct rhashtable *ht,
165 const struct bucket_table *new_tbl,
166 struct bucket_table *old_tbl, size_t n)
167{
168 struct rhash_head *he, *p, *next;
169 unsigned int h;
170
171 /* Old bucket empty, no work needed. */
172 p = rht_dereference(old_tbl->buckets[n], ht);
173 if (!p)
174 return;
175
176 /* Advance the old bucket pointer one or more times until it
177 * reaches a node that doesn't hash to the same bucket as the
178 * previous node p. Call the previous node p;
179 */
180 h = head_hashfn(ht, p, new_tbl->size);
181 rht_for_each(he, p->next, ht) {
182 if (head_hashfn(ht, he, new_tbl->size) != h)
183 break;
184 p = he;
185 }
186 RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
187
188 /* Find the subsequent node which does hash to the same
189 * bucket as node P, or NULL if no such node exists.
190 */
191 next = NULL;
192 if (he) {
193 rht_for_each(he, he->next, ht) {
194 if (head_hashfn(ht, he, new_tbl->size) == h) {
195 next = he;
196 break;
197 }
198 }
199 }
200
201 /* Set p's next pointer to that subsequent node pointer,
202 * bypassing the nodes which do not hash to p's bucket
203 */
204 RCU_INIT_POINTER(p->next, next);
205}
206
207/**
208 * rhashtable_expand - Expand hash table while allowing concurrent lookups
209 * @ht: the hash table to expand
210 * @flags: allocation flags
211 *
212 * A secondary bucket array is allocated and the hash entries are migrated
213 * while keeping them on both lists until the end of the RCU grace period.
214 *
215 * This function may only be called in a context where it is safe to call
216 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
217 *
218 * The caller must ensure that no concurrent table mutations take place.
219 * It is however valid to have concurrent lookups if they are RCU protected.
220 */
221int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
222{
223 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
224 struct rhash_head *he;
225 unsigned int i, h;
226 bool complete;
227
228 ASSERT_RHT_MUTEX(ht);
229
230 if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
231 return 0;
232
233 new_tbl = bucket_table_alloc(old_tbl->size * 2, flags);
234 if (new_tbl == NULL)
235 return -ENOMEM;
236
237 ht->shift++;
238
239 /* For each new bucket, search the corresponding old bucket
240 * for the first entry that hashes to the new bucket, and
241 * link the new bucket to that entry. Since all the entries
242 * which will end up in the new bucket appear in the same
243 * old bucket, this constructs an entirely valid new hash
244 * table, but with multiple buckets "zipped" together into a
245 * single imprecise chain.
246 */
247 for (i = 0; i < new_tbl->size; i++) {
248 h = i & (old_tbl->size - 1);
249 rht_for_each(he, old_tbl->buckets[h], ht) {
250 if (head_hashfn(ht, he, new_tbl->size) == i) {
251 RCU_INIT_POINTER(new_tbl->buckets[i], he);
252 break;
253 }
254 }
255 }
256
257 /* Publish the new table pointer. Lookups may now traverse
258 * the new table, but they will not benefit from any
259 * additional efficiency until later steps unzip the buckets.
260 */
261 rcu_assign_pointer(ht->tbl, new_tbl);
262
263 /* Unzip interleaved hash chains */
264 do {
265 /* Wait for readers. All new readers will see the new
266 * table, and thus no references to the old table will
267 * remain.
268 */
269 synchronize_rcu();
270
271 /* For each bucket in the old table (each of which
272 * contains items from multiple buckets of the new
273 * table): ...
274 */
275 complete = true;
276 for (i = 0; i < old_tbl->size; i++) {
277 hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
278 if (old_tbl->buckets[i] != NULL)
279 complete = false;
280 }
281 } while (!complete);
282
283 bucket_table_free(old_tbl);
284 return 0;
285}
286EXPORT_SYMBOL_GPL(rhashtable_expand);
287
288/**
289 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
290 * @ht: the hash table to shrink
291 * @flags: allocation flags
292 *
293 * This function may only be called in a context where it is safe to call
294 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
295 *
296 * The caller must ensure that no concurrent table mutations take place.
297 * It is however valid to have concurrent lookups if they are RCU protected.
298 */
299int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
300{
301 struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
302 struct rhash_head __rcu **pprev;
303 unsigned int i;
304
305 ASSERT_RHT_MUTEX(ht);
306
307 if (tbl->size <= HASH_MIN_SIZE)
308 return 0;
309
310 ntbl = bucket_table_alloc(tbl->size / 2, flags);
311 if (ntbl == NULL)
312 return -ENOMEM;
313
314 ht->shift--;
315
316 /* Link each bucket in the new table to the first bucket
317 * in the old table that contains entries which will hash
318 * to the new bucket.
319 */
320 for (i = 0; i < ntbl->size; i++) {
321 ntbl->buckets[i] = tbl->buckets[i];
322
323 /* Link each bucket in the new table to the first bucket
324 * in the old table that contains entries which will hash
325 * to the new bucket.
326 */
327 for (pprev = &ntbl->buckets[i]; *pprev != NULL;
328 pprev = &rht_dereference(*pprev, ht)->next)
329 ;
330 RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
331 }
332
333 /* Publish the new, valid hash table */
334 rcu_assign_pointer(ht->tbl, ntbl);
335
336 /* Wait for readers. No new readers will have references to the
337 * old hash table.
338 */
339 synchronize_rcu();
340
341 bucket_table_free(tbl);
342
343 return 0;
344}
345EXPORT_SYMBOL_GPL(rhashtable_shrink);
346
347/**
348 * rhashtable_insert - insert object into hash hash table
349 * @ht: hash table
350 * @obj: pointer to hash head inside object
351 * @flags: allocation flags (table expansion)
352 *
353 * Will automatically grow the table via rhashtable_expand() if the the
354 * grow_decision function specified at rhashtable_init() returns true.
355 *
356 * The caller must ensure that no concurrent table mutations occur. It is
357 * however valid to have concurrent lookups if they are RCU protected.
358 */
359void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
360 gfp_t flags)
361{
362 struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
363 u32 hash;
364
365 ASSERT_RHT_MUTEX(ht);
366
367 hash = head_hashfn(ht, obj, tbl->size);
368 RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
369 rcu_assign_pointer(tbl->buckets[hash], obj);
370 ht->nelems++;
371
372 if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
373 rhashtable_expand(ht, flags);
374}
375EXPORT_SYMBOL_GPL(rhashtable_insert);
376
377/**
378 * rhashtable_remove_pprev - remove object from hash table given previous element
379 * @ht: hash table
380 * @obj: pointer to hash head inside object
381 * @pprev: pointer to previous element
382 * @flags: allocation flags (table expansion)
383 *
384 * Identical to rhashtable_remove() but caller is alreayd aware of the element
385 * in front of the element to be deleted. This is in particular useful for
386 * deletion when combined with walking or lookup.
387 */
388void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
389 struct rhash_head **pprev, gfp_t flags)
390{
391 struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
392
393 ASSERT_RHT_MUTEX(ht);
394
395 RCU_INIT_POINTER(*pprev, obj->next);
396 ht->nelems--;
397
398 if (ht->p.shrink_decision &&
399 ht->p.shrink_decision(ht, tbl->size))
400 rhashtable_shrink(ht, flags);
401}
402EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
403
404/**
405 * rhashtable_remove - remove object from hash table
406 * @ht: hash table
407 * @obj: pointer to hash head inside object
408 * @flags: allocation flags (table expansion)
409 *
410 * Since the hash chain is single linked, the removal operation needs to
411 * walk the bucket chain upon removal. The removal operation is thus
412 * considerable slow if the hash table is not correctly sized.
413 *
414 * Will automatically shrink the table via rhashtable_expand() if the the
415 * shrink_decision function specified at rhashtable_init() returns true.
416 *
417 * The caller must ensure that no concurrent table mutations occur. It is
418 * however valid to have concurrent lookups if they are RCU protected.
419 */
420bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
421 gfp_t flags)
422{
423 struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
424 struct rhash_head __rcu **pprev;
425 struct rhash_head *he;
426 u32 h;
427
428 ASSERT_RHT_MUTEX(ht);
429
430 h = head_hashfn(ht, obj, tbl->size);
431
432 pprev = &tbl->buckets[h];
433 rht_for_each(he, tbl->buckets[h], ht) {
434 if (he != obj) {
435 pprev = &he->next;
436 continue;
437 }
438
439 rhashtable_remove_pprev(ht, he, pprev, flags);
440 return true;
441 }
442
443 return false;
444}
445EXPORT_SYMBOL_GPL(rhashtable_remove);
446
447/**
448 * rhashtable_lookup - lookup key in hash table
449 * @ht: hash table
450 * @key: pointer to key
451 *
452 * Computes the hash value for the key and traverses the bucket chain looking
453 * for a entry with an identical key. The first matching entry is returned.
454 *
455 * This lookup function may only be used for fixed key hash table (key_len
456 * paramter set). It will BUG() if used inappropriately.
457 *
458 * Lookups may occur in parallel with hash mutations as long as the lookup is
459 * guarded by rcu_read_lock(). The caller must take care of this.
460 */
461void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
462{
463 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
464 struct rhash_head *he;
465 u32 h;
466
467 BUG_ON(!ht->p.key_len);
468
469 h = __hashfn(ht, key, ht->p.key_len, tbl->size);
470 rht_for_each_rcu(he, tbl->buckets[h], ht) {
471 if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
472 ht->p.key_len))
473 continue;
474 return (void *) he - ht->p.head_offset;
475 }
476
477 return NULL;
478}
479EXPORT_SYMBOL_GPL(rhashtable_lookup);
480
481/**
482 * rhashtable_lookup_compare - search hash table with compare function
483 * @ht: hash table
484 * @hash: hash value of desired entry
485 * @compare: compare function, must return true on match
486 * @arg: argument passed on to compare function
487 *
488 * Traverses the bucket chain behind the provided hash value and calls the
489 * specified compare function for each entry.
490 *
491 * Lookups may occur in parallel with hash mutations as long as the lookup is
492 * guarded by rcu_read_lock(). The caller must take care of this.
493 *
494 * Returns the first entry on which the compare function returned true.
495 */
496void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
497 bool (*compare)(void *, void *), void *arg)
498{
499 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
500 struct rhash_head *he;
501
502 if (unlikely(hash >= tbl->size))
503 return NULL;
504
505 rht_for_each_rcu(he, tbl->buckets[hash], ht) {
506 if (!compare(rht_obj(ht, he), arg))
507 continue;
508 return (void *) he - ht->p.head_offset;
509 }
510
511 return NULL;
512}
513EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
514
515static size_t rounded_hashtable_size(unsigned int nelem)
516{
517 return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE);
518}
519
520/**
521 * rhashtable_init - initialize a new hash table
522 * @ht: hash table to be initialized
523 * @params: configuration parameters
524 *
525 * Initializes a new hash table based on the provided configuration
526 * parameters. A table can be configured either with a variable or
527 * fixed length key:
528 *
529 * Configuration Example 1: Fixed length keys
530 * struct test_obj {
531 * int key;
532 * void * my_member;
533 * struct rhash_head node;
534 * };
535 *
536 * struct rhashtable_params params = {
537 * .head_offset = offsetof(struct test_obj, node),
538 * .key_offset = offsetof(struct test_obj, key),
539 * .key_len = sizeof(int),
540 * .hashfn = arch_fast_hash,
541 * .mutex_is_held = &my_mutex_is_held,
542 * };
543 *
544 * Configuration Example 2: Variable length keys
545 * struct test_obj {
546 * [...]
547 * struct rhash_head node;
548 * };
549 *
550 * u32 my_hash_fn(const void *data, u32 seed)
551 * {
552 * struct test_obj *obj = data;
553 *
554 * return [... hash ...];
555 * }
556 *
557 * struct rhashtable_params params = {
558 * .head_offset = offsetof(struct test_obj, node),
559 * .hashfn = arch_fast_hash,
560 * .obj_hashfn = my_hash_fn,
561 * .mutex_is_held = &my_mutex_is_held,
562 * };
563 */
564int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
565{
566 struct bucket_table *tbl;
567 size_t size;
568
569 size = HASH_DEFAULT_SIZE;
570
571 if ((params->key_len && !params->hashfn) ||
572 (!params->key_len && !params->obj_hashfn))
573 return -EINVAL;
574
575 if (params->nelem_hint)
576 size = rounded_hashtable_size(params->nelem_hint);
577
578 tbl = bucket_table_alloc(size, GFP_KERNEL);
579 if (tbl == NULL)
580 return -ENOMEM;
581
582 memset(ht, 0, sizeof(*ht));
583 ht->shift = ilog2(tbl->size);
584 memcpy(&ht->p, params, sizeof(*params));
585 RCU_INIT_POINTER(ht->tbl, tbl);
586
587 if (!ht->p.hash_rnd)
588 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
589
590 return 0;
591}
592EXPORT_SYMBOL_GPL(rhashtable_init);
593
594/**
595 * rhashtable_destroy - destroy hash table
596 * @ht: the hash table to destroy
597 *
598 * Frees the bucket array.
599 */
600void rhashtable_destroy(const struct rhashtable *ht)
601{
602 const struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
603
604 bucket_table_free(tbl);
605}
606EXPORT_SYMBOL_GPL(rhashtable_destroy);
607
608/**************************************************************************
609 * Self Test
610 **************************************************************************/
611
612#ifdef CONFIG_TEST_RHASHTABLE
613
614#define TEST_HT_SIZE 8
615#define TEST_ENTRIES 2048
616#define TEST_PTR ((void *) 0xdeadbeef)
617#define TEST_NEXPANDS 4
618
619static int test_mutex_is_held(void)
620{
621 return 1;
622}
623
624struct test_obj {
625 void *ptr;
626 int value;
627 struct rhash_head node;
628};
629
630static int __init test_rht_lookup(struct rhashtable *ht)
631{
632 unsigned int i;
633
634 for (i = 0; i < TEST_ENTRIES * 2; i++) {
635 struct test_obj *obj;
636 bool expected = !(i % 2);
637 u32 key = i;
638
639 obj = rhashtable_lookup(ht, &key);
640
641 if (expected && !obj) {
642 pr_warn("Test failed: Could not find key %u\n", key);
643 return -ENOENT;
644 } else if (!expected && obj) {
645 pr_warn("Test failed: Unexpected entry found for key %u\n",
646 key);
647 return -EEXIST;
648 } else if (expected && obj) {
649 if (obj->ptr != TEST_PTR || obj->value != i) {
650 pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
651 obj->ptr, TEST_PTR, obj->value, i);
652 return -EINVAL;
653 }
654 }
655 }
656
657 return 0;
658}
659
660static void test_bucket_stats(struct rhashtable *ht,
661 struct bucket_table *tbl,
662 bool quiet)
663{
664 unsigned int cnt, i, total = 0;
665 struct test_obj *obj;
666
667 for (i = 0; i < tbl->size; i++) {
668 cnt = 0;
669
670 if (!quiet)
671 pr_info(" [%#4x/%zu]", i, tbl->size);
672
673 rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
674 cnt++;
675 total++;
676 if (!quiet)
677 pr_cont(" [%p],", obj);
678 }
679
680 if (!quiet)
681 pr_cont("\n [%#x] first element: %p, chain length: %u\n",
682 i, tbl->buckets[i], cnt);
683 }
684
685 pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
686 total, ht->nelems, TEST_ENTRIES);
687}
688
689static int __init test_rhashtable(struct rhashtable *ht)
690{
691 struct bucket_table *tbl;
692 struct test_obj *obj, *next;
693 int err;
694 unsigned int i;
695
696 /*
697 * Insertion Test:
698 * Insert TEST_ENTRIES into table with all keys even numbers
699 */
700 pr_info(" Adding %d keys\n", TEST_ENTRIES);
701 for (i = 0; i < TEST_ENTRIES; i++) {
702 struct test_obj *obj;
703
704 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
705 if (!obj) {
706 err = -ENOMEM;
707 goto error;
708 }
709
710 obj->ptr = TEST_PTR;
711 obj->value = i * 2;
712
713 rhashtable_insert(ht, &obj->node, GFP_KERNEL);
714 }
715
716 rcu_read_lock();
717 tbl = rht_dereference_rcu(ht->tbl, ht);
718 test_bucket_stats(ht, tbl, true);
719 test_rht_lookup(ht);
720 rcu_read_unlock();
721
722 for (i = 0; i < TEST_NEXPANDS; i++) {
723 pr_info(" Table expansion iteration %u...\n", i);
724 rhashtable_expand(ht, GFP_KERNEL);
725
726 rcu_read_lock();
727 pr_info(" Verifying lookups...\n");
728 test_rht_lookup(ht);
729 rcu_read_unlock();
730 }
731
732 for (i = 0; i < TEST_NEXPANDS; i++) {
733 pr_info(" Table shrinkage iteration %u...\n", i);
734 rhashtable_shrink(ht, GFP_KERNEL);
735
736 rcu_read_lock();
737 pr_info(" Verifying lookups...\n");
738 test_rht_lookup(ht);
739 rcu_read_unlock();
740 }
741
742 pr_info(" Deleting %d keys\n", TEST_ENTRIES);
743 for (i = 0; i < TEST_ENTRIES; i++) {
744 u32 key = i * 2;
745
746 obj = rhashtable_lookup(ht, &key);
747 BUG_ON(!obj);
748
749 rhashtable_remove(ht, &obj->node, GFP_KERNEL);
750 kfree(obj);
751 }
752
753 return 0;
754
755error:
756 tbl = rht_dereference_rcu(ht->tbl, ht);
757 for (i = 0; i < tbl->size; i++)
758 rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
759 kfree(obj);
760
761 return err;
762}
763
764static int __init test_rht_init(void)
765{
766 struct rhashtable ht;
767 struct rhashtable_params params = {
768 .nelem_hint = TEST_HT_SIZE,
769 .head_offset = offsetof(struct test_obj, node),
770 .key_offset = offsetof(struct test_obj, value),
771 .key_len = sizeof(int),
772 .hashfn = arch_fast_hash,
773 .mutex_is_held = &test_mutex_is_held,
774 .grow_decision = rht_grow_above_75,
775 .shrink_decision = rht_shrink_below_30,
776 };
777 int err;
778
779 pr_info("Running resizable hashtable tests...\n");
780
781 err = rhashtable_init(&ht, &params);
782 if (err < 0) {
783 pr_warn("Test failed: Unable to initialize hashtable: %d\n",
784 err);
785 return err;
786 }
787
788 err = test_rhashtable(&ht);
789
790 rhashtable_destroy(&ht);
791
792 return err;
793}
794
795subsys_initcall(test_rht_init);
796
797#endif /* CONFIG_TEST_RHASHTABLE */