diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-02-10 14:35:36 -0500 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-02-10 14:35:36 -0500 |
commit | 4ba24fef3eb3b142197135223b90ced2f319cd53 (patch) | |
tree | a20c125b27740ec7b4c761b11d801108e1b316b2 /lib/rhashtable.c | |
parent | 47c1ffb2b6b630894e9a16442611c056ab21c057 (diff) | |
parent | 98a4a59ee31a12105a2b84f5b8b515ac2cb208ef (diff) |
Merge branch 'next' into for-linus
Prepare first round of input updates for 3.20.
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r-- | lib/rhashtable.c | 114 |
1 files changed, 66 insertions, 48 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 16d02639d334..6c3c723e902b 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/vmalloc.h> | 21 | #include <linux/vmalloc.h> |
22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
23 | #include <linux/hash.h> | 23 | #include <linux/jhash.h> |
24 | #include <linux/random.h> | 24 | #include <linux/random.h> |
25 | #include <linux/rhashtable.h> | 25 | #include <linux/rhashtable.h> |
26 | 26 | ||
@@ -32,7 +32,7 @@ | |||
32 | #ifdef CONFIG_PROVE_LOCKING | 32 | #ifdef CONFIG_PROVE_LOCKING |
33 | int lockdep_rht_mutex_is_held(const struct rhashtable *ht) | 33 | int lockdep_rht_mutex_is_held(const struct rhashtable *ht) |
34 | { | 34 | { |
35 | return ht->p.mutex_is_held(); | 35 | return ht->p.mutex_is_held(ht->p.parent); |
36 | } | 36 | } |
37 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); | 37 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); |
38 | #endif | 38 | #endif |
@@ -54,7 +54,7 @@ static u32 __hashfn(const struct rhashtable *ht, const void *key, | |||
54 | 54 | ||
55 | /** | 55 | /** |
56 | * rhashtable_hashfn - compute hash for key of given length | 56 | * rhashtable_hashfn - compute hash for key of given length |
57 | * @ht: hash table to compuate for | 57 | * @ht: hash table to compute for |
58 | * @key: pointer to key | 58 | * @key: pointer to key |
59 | * @len: length of key | 59 | * @len: length of key |
60 | * | 60 | * |
@@ -85,7 +85,7 @@ static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize) | |||
85 | 85 | ||
86 | /** | 86 | /** |
87 | * rhashtable_obj_hashfn - compute hash for hashed object | 87 | * rhashtable_obj_hashfn - compute hash for hashed object |
88 | * @ht: hash table to compuate for | 88 | * @ht: hash table to compute for |
89 | * @ptr: pointer to hashed object | 89 | * @ptr: pointer to hashed object |
90 | * | 90 | * |
91 | * Computes the hash value using the hash function `hashfn` respectively | 91 | * Computes the hash value using the hash function `hashfn` respectively |
@@ -107,13 +107,13 @@ static u32 head_hashfn(const struct rhashtable *ht, | |||
107 | return obj_hashfn(ht, rht_obj(ht, he), hsize); | 107 | return obj_hashfn(ht, rht_obj(ht, he), hsize); |
108 | } | 108 | } |
109 | 109 | ||
110 | static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags) | 110 | static struct bucket_table *bucket_table_alloc(size_t nbuckets) |
111 | { | 111 | { |
112 | struct bucket_table *tbl; | 112 | struct bucket_table *tbl; |
113 | size_t size; | 113 | size_t size; |
114 | 114 | ||
115 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | 115 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
116 | tbl = kzalloc(size, flags); | 116 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); |
117 | if (tbl == NULL) | 117 | if (tbl == NULL) |
118 | tbl = vzalloc(size); | 118 | tbl = vzalloc(size); |
119 | 119 | ||
@@ -200,7 +200,6 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, | |||
200 | /** | 200 | /** |
201 | * rhashtable_expand - Expand hash table while allowing concurrent lookups | 201 | * rhashtable_expand - Expand hash table while allowing concurrent lookups |
202 | * @ht: the hash table to expand | 202 | * @ht: the hash table to expand |
203 | * @flags: allocation flags | ||
204 | * | 203 | * |
205 | * A secondary bucket array is allocated and the hash entries are migrated | 204 | * A secondary bucket array is allocated and the hash entries are migrated |
206 | * while keeping them on both lists until the end of the RCU grace period. | 205 | * while keeping them on both lists until the end of the RCU grace period. |
@@ -211,7 +210,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, | |||
211 | * The caller must ensure that no concurrent table mutations take place. | 210 | * The caller must ensure that no concurrent table mutations take place. |
212 | * It is however valid to have concurrent lookups if they are RCU protected. | 211 | * It is however valid to have concurrent lookups if they are RCU protected. |
213 | */ | 212 | */ |
214 | int rhashtable_expand(struct rhashtable *ht, gfp_t flags) | 213 | int rhashtable_expand(struct rhashtable *ht) |
215 | { | 214 | { |
216 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | 215 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
217 | struct rhash_head *he; | 216 | struct rhash_head *he; |
@@ -223,14 +222,14 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags) | |||
223 | if (ht->p.max_shift && ht->shift >= ht->p.max_shift) | 222 | if (ht->p.max_shift && ht->shift >= ht->p.max_shift) |
224 | return 0; | 223 | return 0; |
225 | 224 | ||
226 | new_tbl = bucket_table_alloc(old_tbl->size * 2, flags); | 225 | new_tbl = bucket_table_alloc(old_tbl->size * 2); |
227 | if (new_tbl == NULL) | 226 | if (new_tbl == NULL) |
228 | return -ENOMEM; | 227 | return -ENOMEM; |
229 | 228 | ||
230 | ht->shift++; | 229 | ht->shift++; |
231 | 230 | ||
232 | /* For each new bucket, search the corresponding old bucket | 231 | /* For each new bucket, search the corresponding old bucket |
233 | * for the first entry that hashes to the new bucket, and | 232 | * for the first entry that hashes to the new bucket, and |
234 | * link the new bucket to that entry. Since all the entries | 233 | * link the new bucket to that entry. Since all the entries |
235 | * which will end up in the new bucket appear in the same | 234 | * which will end up in the new bucket appear in the same |
236 | * old bucket, this constructs an entirely valid new hash | 235 | * old bucket, this constructs an entirely valid new hash |
@@ -248,8 +247,8 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags) | |||
248 | } | 247 | } |
249 | 248 | ||
250 | /* Publish the new table pointer. Lookups may now traverse | 249 | /* Publish the new table pointer. Lookups may now traverse |
251 | * the new table, but they will not benefit from any | 250 | * the new table, but they will not benefit from any |
252 | * additional efficiency until later steps unzip the buckets. | 251 | * additional efficiency until later steps unzip the buckets. |
253 | */ | 252 | */ |
254 | rcu_assign_pointer(ht->tbl, new_tbl); | 253 | rcu_assign_pointer(ht->tbl, new_tbl); |
255 | 254 | ||
@@ -281,7 +280,6 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); | |||
281 | /** | 280 | /** |
282 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups | 281 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups |
283 | * @ht: the hash table to shrink | 282 | * @ht: the hash table to shrink |
284 | * @flags: allocation flags | ||
285 | * | 283 | * |
286 | * This function may only be called in a context where it is safe to call | 284 | * This function may only be called in a context where it is safe to call |
287 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | 285 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. |
@@ -289,7 +287,7 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); | |||
289 | * The caller must ensure that no concurrent table mutations take place. | 287 | * The caller must ensure that no concurrent table mutations take place. |
290 | * It is however valid to have concurrent lookups if they are RCU protected. | 288 | * It is however valid to have concurrent lookups if they are RCU protected. |
291 | */ | 289 | */ |
292 | int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) | 290 | int rhashtable_shrink(struct rhashtable *ht) |
293 | { | 291 | { |
294 | struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); | 292 | struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); |
295 | struct rhash_head __rcu **pprev; | 293 | struct rhash_head __rcu **pprev; |
@@ -297,23 +295,23 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) | |||
297 | 295 | ||
298 | ASSERT_RHT_MUTEX(ht); | 296 | ASSERT_RHT_MUTEX(ht); |
299 | 297 | ||
300 | if (tbl->size <= HASH_MIN_SIZE) | 298 | if (ht->shift <= ht->p.min_shift) |
301 | return 0; | 299 | return 0; |
302 | 300 | ||
303 | ntbl = bucket_table_alloc(tbl->size / 2, flags); | 301 | ntbl = bucket_table_alloc(tbl->size / 2); |
304 | if (ntbl == NULL) | 302 | if (ntbl == NULL) |
305 | return -ENOMEM; | 303 | return -ENOMEM; |
306 | 304 | ||
307 | ht->shift--; | 305 | ht->shift--; |
308 | 306 | ||
309 | /* Link each bucket in the new table to the first bucket | 307 | /* Link each bucket in the new table to the first bucket |
310 | * in the old table that contains entries which will hash | 308 | * in the old table that contains entries which will hash |
311 | * to the new bucket. | 309 | * to the new bucket. |
312 | */ | 310 | */ |
313 | for (i = 0; i < ntbl->size; i++) { | 311 | for (i = 0; i < ntbl->size; i++) { |
314 | ntbl->buckets[i] = tbl->buckets[i]; | 312 | ntbl->buckets[i] = tbl->buckets[i]; |
315 | 313 | ||
316 | /* Link each bucket in the new table to the first bucket | 314 | /* Link each bucket in the new table to the first bucket |
317 | * in the old table that contains entries which will hash | 315 | * in the old table that contains entries which will hash |
318 | * to the new bucket. | 316 | * to the new bucket. |
319 | */ | 317 | */ |
@@ -341,7 +339,6 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink); | |||
341 | * rhashtable_insert - insert object into hash hash table | 339 | * rhashtable_insert - insert object into hash hash table |
342 | * @ht: hash table | 340 | * @ht: hash table |
343 | * @obj: pointer to hash head inside object | 341 | * @obj: pointer to hash head inside object |
344 | * @flags: allocation flags (table expansion) | ||
345 | * | 342 | * |
346 | * Will automatically grow the table via rhashtable_expand() if the the | 343 | * Will automatically grow the table via rhashtable_expand() if the the |
347 | * grow_decision function specified at rhashtable_init() returns true. | 344 | * grow_decision function specified at rhashtable_init() returns true. |
@@ -349,8 +346,7 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink); | |||
349 | * The caller must ensure that no concurrent table mutations occur. It is | 346 | * The caller must ensure that no concurrent table mutations occur. It is |
350 | * however valid to have concurrent lookups if they are RCU protected. | 347 | * however valid to have concurrent lookups if they are RCU protected. |
351 | */ | 348 | */ |
352 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | 349 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) |
353 | gfp_t flags) | ||
354 | { | 350 | { |
355 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 351 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); |
356 | u32 hash; | 352 | u32 hash; |
@@ -363,7 +359,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | |||
363 | ht->nelems++; | 359 | ht->nelems++; |
364 | 360 | ||
365 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) | 361 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) |
366 | rhashtable_expand(ht, flags); | 362 | rhashtable_expand(ht); |
367 | } | 363 | } |
368 | EXPORT_SYMBOL_GPL(rhashtable_insert); | 364 | EXPORT_SYMBOL_GPL(rhashtable_insert); |
369 | 365 | ||
@@ -372,14 +368,13 @@ EXPORT_SYMBOL_GPL(rhashtable_insert); | |||
372 | * @ht: hash table | 368 | * @ht: hash table |
373 | * @obj: pointer to hash head inside object | 369 | * @obj: pointer to hash head inside object |
374 | * @pprev: pointer to previous element | 370 | * @pprev: pointer to previous element |
375 | * @flags: allocation flags (table expansion) | ||
376 | * | 371 | * |
377 | * Identical to rhashtable_remove() but caller is alreayd aware of the element | 372 | * Identical to rhashtable_remove() but caller is alreayd aware of the element |
378 | * in front of the element to be deleted. This is in particular useful for | 373 | * in front of the element to be deleted. This is in particular useful for |
379 | * deletion when combined with walking or lookup. | 374 | * deletion when combined with walking or lookup. |
380 | */ | 375 | */ |
381 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, | 376 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, |
382 | struct rhash_head __rcu **pprev, gfp_t flags) | 377 | struct rhash_head __rcu **pprev) |
383 | { | 378 | { |
384 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 379 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); |
385 | 380 | ||
@@ -390,7 +385,7 @@ void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, | |||
390 | 385 | ||
391 | if (ht->p.shrink_decision && | 386 | if (ht->p.shrink_decision && |
392 | ht->p.shrink_decision(ht, tbl->size)) | 387 | ht->p.shrink_decision(ht, tbl->size)) |
393 | rhashtable_shrink(ht, flags); | 388 | rhashtable_shrink(ht); |
394 | } | 389 | } |
395 | EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); | 390 | EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); |
396 | 391 | ||
@@ -398,7 +393,6 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); | |||
398 | * rhashtable_remove - remove object from hash table | 393 | * rhashtable_remove - remove object from hash table |
399 | * @ht: hash table | 394 | * @ht: hash table |
400 | * @obj: pointer to hash head inside object | 395 | * @obj: pointer to hash head inside object |
401 | * @flags: allocation flags (table expansion) | ||
402 | * | 396 | * |
403 | * Since the hash chain is single linked, the removal operation needs to | 397 | * Since the hash chain is single linked, the removal operation needs to |
404 | * walk the bucket chain upon removal. The removal operation is thus | 398 | * walk the bucket chain upon removal. The removal operation is thus |
@@ -410,8 +404,7 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); | |||
410 | * The caller must ensure that no concurrent table mutations occur. It is | 404 | * The caller must ensure that no concurrent table mutations occur. It is |
411 | * however valid to have concurrent lookups if they are RCU protected. | 405 | * however valid to have concurrent lookups if they are RCU protected. |
412 | */ | 406 | */ |
413 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, | 407 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) |
414 | gfp_t flags) | ||
415 | { | 408 | { |
416 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 409 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); |
417 | struct rhash_head __rcu **pprev; | 410 | struct rhash_head __rcu **pprev; |
@@ -429,7 +422,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, | |||
429 | continue; | 422 | continue; |
430 | } | 423 | } |
431 | 424 | ||
432 | rhashtable_remove_pprev(ht, he, pprev, flags); | 425 | rhashtable_remove_pprev(ht, he, pprev); |
433 | return true; | 426 | return true; |
434 | } | 427 | } |
435 | 428 | ||
@@ -505,9 +498,10 @@ void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, | |||
505 | } | 498 | } |
506 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); | 499 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); |
507 | 500 | ||
508 | static size_t rounded_hashtable_size(unsigned int nelem) | 501 | static size_t rounded_hashtable_size(struct rhashtable_params *params) |
509 | { | 502 | { |
510 | return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE); | 503 | return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), |
504 | 1UL << params->min_shift); | ||
511 | } | 505 | } |
512 | 506 | ||
513 | /** | 507 | /** |
@@ -530,8 +524,10 @@ static size_t rounded_hashtable_size(unsigned int nelem) | |||
530 | * .head_offset = offsetof(struct test_obj, node), | 524 | * .head_offset = offsetof(struct test_obj, node), |
531 | * .key_offset = offsetof(struct test_obj, key), | 525 | * .key_offset = offsetof(struct test_obj, key), |
532 | * .key_len = sizeof(int), | 526 | * .key_len = sizeof(int), |
533 | * .hashfn = arch_fast_hash, | 527 | * .hashfn = jhash, |
528 | * #ifdef CONFIG_PROVE_LOCKING | ||
534 | * .mutex_is_held = &my_mutex_is_held, | 529 | * .mutex_is_held = &my_mutex_is_held, |
530 | * #endif | ||
535 | * }; | 531 | * }; |
536 | * | 532 | * |
537 | * Configuration Example 2: Variable length keys | 533 | * Configuration Example 2: Variable length keys |
@@ -549,9 +545,11 @@ static size_t rounded_hashtable_size(unsigned int nelem) | |||
549 | * | 545 | * |
550 | * struct rhashtable_params params = { | 546 | * struct rhashtable_params params = { |
551 | * .head_offset = offsetof(struct test_obj, node), | 547 | * .head_offset = offsetof(struct test_obj, node), |
552 | * .hashfn = arch_fast_hash, | 548 | * .hashfn = jhash, |
553 | * .obj_hashfn = my_hash_fn, | 549 | * .obj_hashfn = my_hash_fn, |
550 | * #ifdef CONFIG_PROVE_LOCKING | ||
554 | * .mutex_is_held = &my_mutex_is_held, | 551 | * .mutex_is_held = &my_mutex_is_held, |
552 | * #endif | ||
555 | * }; | 553 | * }; |
556 | */ | 554 | */ |
557 | int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | 555 | int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) |
@@ -565,10 +563,13 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | |||
565 | (!params->key_len && !params->obj_hashfn)) | 563 | (!params->key_len && !params->obj_hashfn)) |
566 | return -EINVAL; | 564 | return -EINVAL; |
567 | 565 | ||
566 | params->min_shift = max_t(size_t, params->min_shift, | ||
567 | ilog2(HASH_MIN_SIZE)); | ||
568 | |||
568 | if (params->nelem_hint) | 569 | if (params->nelem_hint) |
569 | size = rounded_hashtable_size(params->nelem_hint); | 570 | size = rounded_hashtable_size(params); |
570 | 571 | ||
571 | tbl = bucket_table_alloc(size, GFP_KERNEL); | 572 | tbl = bucket_table_alloc(size); |
572 | if (tbl == NULL) | 573 | if (tbl == NULL) |
573 | return -ENOMEM; | 574 | return -ENOMEM; |
574 | 575 | ||
@@ -609,10 +610,12 @@ EXPORT_SYMBOL_GPL(rhashtable_destroy); | |||
609 | #define TEST_PTR ((void *) 0xdeadbeef) | 610 | #define TEST_PTR ((void *) 0xdeadbeef) |
610 | #define TEST_NEXPANDS 4 | 611 | #define TEST_NEXPANDS 4 |
611 | 612 | ||
612 | static int test_mutex_is_held(void) | 613 | #ifdef CONFIG_PROVE_LOCKING |
614 | static int test_mutex_is_held(void *parent) | ||
613 | { | 615 | { |
614 | return 1; | 616 | return 1; |
615 | } | 617 | } |
618 | #endif | ||
616 | 619 | ||
617 | struct test_obj { | 620 | struct test_obj { |
618 | void *ptr; | 621 | void *ptr; |
@@ -650,15 +653,15 @@ static int __init test_rht_lookup(struct rhashtable *ht) | |||
650 | return 0; | 653 | return 0; |
651 | } | 654 | } |
652 | 655 | ||
653 | static void test_bucket_stats(struct rhashtable *ht, | 656 | static void test_bucket_stats(struct rhashtable *ht, bool quiet) |
654 | struct bucket_table *tbl, | ||
655 | bool quiet) | ||
656 | { | 657 | { |
657 | unsigned int cnt, i, total = 0; | 658 | unsigned int cnt, rcu_cnt, i, total = 0; |
658 | struct test_obj *obj; | 659 | struct test_obj *obj; |
660 | struct bucket_table *tbl; | ||
659 | 661 | ||
662 | tbl = rht_dereference_rcu(ht->tbl, ht); | ||
660 | for (i = 0; i < tbl->size; i++) { | 663 | for (i = 0; i < tbl->size; i++) { |
661 | cnt = 0; | 664 | rcu_cnt = cnt = 0; |
662 | 665 | ||
663 | if (!quiet) | 666 | if (!quiet) |
664 | pr_info(" [%#4x/%zu]", i, tbl->size); | 667 | pr_info(" [%#4x/%zu]", i, tbl->size); |
@@ -670,6 +673,13 @@ static void test_bucket_stats(struct rhashtable *ht, | |||
670 | pr_cont(" [%p],", obj); | 673 | pr_cont(" [%p],", obj); |
671 | } | 674 | } |
672 | 675 | ||
676 | rht_for_each_entry_rcu(obj, tbl->buckets[i], node) | ||
677 | rcu_cnt++; | ||
678 | |||
679 | if (rcu_cnt != cnt) | ||
680 | pr_warn("Test failed: Chain count mismach %d != %d", | ||
681 | cnt, rcu_cnt); | ||
682 | |||
673 | if (!quiet) | 683 | if (!quiet) |
674 | pr_cont("\n [%#x] first element: %p, chain length: %u\n", | 684 | pr_cont("\n [%#x] first element: %p, chain length: %u\n", |
675 | i, tbl->buckets[i], cnt); | 685 | i, tbl->buckets[i], cnt); |
@@ -677,6 +687,9 @@ static void test_bucket_stats(struct rhashtable *ht, | |||
677 | 687 | ||
678 | pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n", | 688 | pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n", |
679 | total, ht->nelems, TEST_ENTRIES); | 689 | total, ht->nelems, TEST_ENTRIES); |
690 | |||
691 | if (total != ht->nelems || total != TEST_ENTRIES) | ||
692 | pr_warn("Test failed: Total count mismatch ^^^"); | ||
680 | } | 693 | } |
681 | 694 | ||
682 | static int __init test_rhashtable(struct rhashtable *ht) | 695 | static int __init test_rhashtable(struct rhashtable *ht) |
@@ -703,18 +716,17 @@ static int __init test_rhashtable(struct rhashtable *ht) | |||
703 | obj->ptr = TEST_PTR; | 716 | obj->ptr = TEST_PTR; |
704 | obj->value = i * 2; | 717 | obj->value = i * 2; |
705 | 718 | ||
706 | rhashtable_insert(ht, &obj->node, GFP_KERNEL); | 719 | rhashtable_insert(ht, &obj->node); |
707 | } | 720 | } |
708 | 721 | ||
709 | rcu_read_lock(); | 722 | rcu_read_lock(); |
710 | tbl = rht_dereference_rcu(ht->tbl, ht); | 723 | test_bucket_stats(ht, true); |
711 | test_bucket_stats(ht, tbl, true); | ||
712 | test_rht_lookup(ht); | 724 | test_rht_lookup(ht); |
713 | rcu_read_unlock(); | 725 | rcu_read_unlock(); |
714 | 726 | ||
715 | for (i = 0; i < TEST_NEXPANDS; i++) { | 727 | for (i = 0; i < TEST_NEXPANDS; i++) { |
716 | pr_info(" Table expansion iteration %u...\n", i); | 728 | pr_info(" Table expansion iteration %u...\n", i); |
717 | rhashtable_expand(ht, GFP_KERNEL); | 729 | rhashtable_expand(ht); |
718 | 730 | ||
719 | rcu_read_lock(); | 731 | rcu_read_lock(); |
720 | pr_info(" Verifying lookups...\n"); | 732 | pr_info(" Verifying lookups...\n"); |
@@ -724,7 +736,7 @@ static int __init test_rhashtable(struct rhashtable *ht) | |||
724 | 736 | ||
725 | for (i = 0; i < TEST_NEXPANDS; i++) { | 737 | for (i = 0; i < TEST_NEXPANDS; i++) { |
726 | pr_info(" Table shrinkage iteration %u...\n", i); | 738 | pr_info(" Table shrinkage iteration %u...\n", i); |
727 | rhashtable_shrink(ht, GFP_KERNEL); | 739 | rhashtable_shrink(ht); |
728 | 740 | ||
729 | rcu_read_lock(); | 741 | rcu_read_lock(); |
730 | pr_info(" Verifying lookups...\n"); | 742 | pr_info(" Verifying lookups...\n"); |
@@ -732,6 +744,10 @@ static int __init test_rhashtable(struct rhashtable *ht) | |||
732 | rcu_read_unlock(); | 744 | rcu_read_unlock(); |
733 | } | 745 | } |
734 | 746 | ||
747 | rcu_read_lock(); | ||
748 | test_bucket_stats(ht, true); | ||
749 | rcu_read_unlock(); | ||
750 | |||
735 | pr_info(" Deleting %d keys\n", TEST_ENTRIES); | 751 | pr_info(" Deleting %d keys\n", TEST_ENTRIES); |
736 | for (i = 0; i < TEST_ENTRIES; i++) { | 752 | for (i = 0; i < TEST_ENTRIES; i++) { |
737 | u32 key = i * 2; | 753 | u32 key = i * 2; |
@@ -739,7 +755,7 @@ static int __init test_rhashtable(struct rhashtable *ht) | |||
739 | obj = rhashtable_lookup(ht, &key); | 755 | obj = rhashtable_lookup(ht, &key); |
740 | BUG_ON(!obj); | 756 | BUG_ON(!obj); |
741 | 757 | ||
742 | rhashtable_remove(ht, &obj->node, GFP_KERNEL); | 758 | rhashtable_remove(ht, &obj->node); |
743 | kfree(obj); | 759 | kfree(obj); |
744 | } | 760 | } |
745 | 761 | ||
@@ -762,8 +778,10 @@ static int __init test_rht_init(void) | |||
762 | .head_offset = offsetof(struct test_obj, node), | 778 | .head_offset = offsetof(struct test_obj, node), |
763 | .key_offset = offsetof(struct test_obj, value), | 779 | .key_offset = offsetof(struct test_obj, value), |
764 | .key_len = sizeof(int), | 780 | .key_len = sizeof(int), |
765 | .hashfn = arch_fast_hash, | 781 | .hashfn = jhash, |
782 | #ifdef CONFIG_PROVE_LOCKING | ||
766 | .mutex_is_held = &test_mutex_is_held, | 783 | .mutex_is_held = &test_mutex_is_held, |
784 | #endif | ||
767 | .grow_decision = rht_grow_above_75, | 785 | .grow_decision = rht_grow_above_75, |
768 | .shrink_decision = rht_shrink_below_30, | 786 | .shrink_decision = rht_shrink_below_30, |
769 | }; | 787 | }; |