diff options
author | Thomas Graf <tgraf@suug.ch> | 2014-11-13 07:45:46 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-11-13 15:18:40 -0500 |
commit | 6eba82248ef47fd478f940a418429e3ec95cb3db (patch) | |
tree | 77e82bd157c6b164347f02ba6c33e5abe4860f86 /lib/rhashtable.c | |
parent | 64bb7e9949c03bff9463c40bfa740f611fb5500d (diff) |
rhashtable: Drop gfp_flags arg in insert/remove functions
Reallocation is only required for shrinking and expanding and both rely
on a mutex for synchronization and callers of rhashtable_init() are in
non atomic context. Therefore, no reason to continue passing allocation
hints through the API.
Instead, use GFP_KERNEL and add __GFP_NOWARN | __GFP_NORETRY to allow
for silent fall back to vzalloc() without the OOM killer jumping in as
pointed out by Eric Dumazet and Eric W. Biederman.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r-- | lib/rhashtable.c | 41 |
1 files changed, 17 insertions, 24 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 4b4b53bfa08b..25e4c213b08a 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -107,13 +107,13 @@ static u32 head_hashfn(const struct rhashtable *ht, | |||
107 | return obj_hashfn(ht, rht_obj(ht, he), hsize); | 107 | return obj_hashfn(ht, rht_obj(ht, he), hsize); |
108 | } | 108 | } |
109 | 109 | ||
110 | static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags) | 110 | static struct bucket_table *bucket_table_alloc(size_t nbuckets) |
111 | { | 111 | { |
112 | struct bucket_table *tbl; | 112 | struct bucket_table *tbl; |
113 | size_t size; | 113 | size_t size; |
114 | 114 | ||
115 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | 115 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
116 | tbl = kzalloc(size, flags); | 116 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); |
117 | if (tbl == NULL) | 117 | if (tbl == NULL) |
118 | tbl = vzalloc(size); | 118 | tbl = vzalloc(size); |
119 | 119 | ||
@@ -200,7 +200,6 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, | |||
200 | /** | 200 | /** |
201 | * rhashtable_expand - Expand hash table while allowing concurrent lookups | 201 | * rhashtable_expand - Expand hash table while allowing concurrent lookups |
202 | * @ht: the hash table to expand | 202 | * @ht: the hash table to expand |
203 | * @flags: allocation flags | ||
204 | * | 203 | * |
205 | * A secondary bucket array is allocated and the hash entries are migrated | 204 | * A secondary bucket array is allocated and the hash entries are migrated |
206 | * while keeping them on both lists until the end of the RCU grace period. | 205 | * while keeping them on both lists until the end of the RCU grace period. |
@@ -211,7 +210,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, | |||
211 | * The caller must ensure that no concurrent table mutations take place. | 210 | * The caller must ensure that no concurrent table mutations take place. |
212 | * It is however valid to have concurrent lookups if they are RCU protected. | 211 | * It is however valid to have concurrent lookups if they are RCU protected. |
213 | */ | 212 | */ |
214 | int rhashtable_expand(struct rhashtable *ht, gfp_t flags) | 213 | int rhashtable_expand(struct rhashtable *ht) |
215 | { | 214 | { |
216 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | 215 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
217 | struct rhash_head *he; | 216 | struct rhash_head *he; |
@@ -223,7 +222,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags) | |||
223 | if (ht->p.max_shift && ht->shift >= ht->p.max_shift) | 222 | if (ht->p.max_shift && ht->shift >= ht->p.max_shift) |
224 | return 0; | 223 | return 0; |
225 | 224 | ||
226 | new_tbl = bucket_table_alloc(old_tbl->size * 2, flags); | 225 | new_tbl = bucket_table_alloc(old_tbl->size * 2); |
227 | if (new_tbl == NULL) | 226 | if (new_tbl == NULL) |
228 | return -ENOMEM; | 227 | return -ENOMEM; |
229 | 228 | ||
@@ -281,7 +280,6 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); | |||
281 | /** | 280 | /** |
282 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups | 281 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups |
283 | * @ht: the hash table to shrink | 282 | * @ht: the hash table to shrink |
284 | * @flags: allocation flags | ||
285 | * | 283 | * |
286 | * This function may only be called in a context where it is safe to call | 284 | * This function may only be called in a context where it is safe to call |
287 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | 285 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. |
@@ -289,7 +287,7 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); | |||
289 | * The caller must ensure that no concurrent table mutations take place. | 287 | * The caller must ensure that no concurrent table mutations take place. |
290 | * It is however valid to have concurrent lookups if they are RCU protected. | 288 | * It is however valid to have concurrent lookups if they are RCU protected. |
291 | */ | 289 | */ |
292 | int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) | 290 | int rhashtable_shrink(struct rhashtable *ht) |
293 | { | 291 | { |
294 | struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); | 292 | struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); |
295 | struct rhash_head __rcu **pprev; | 293 | struct rhash_head __rcu **pprev; |
@@ -300,7 +298,7 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) | |||
300 | if (ht->shift <= ht->p.min_shift) | 298 | if (ht->shift <= ht->p.min_shift) |
301 | return 0; | 299 | return 0; |
302 | 300 | ||
303 | ntbl = bucket_table_alloc(tbl->size / 2, flags); | 301 | ntbl = bucket_table_alloc(tbl->size / 2); |
304 | if (ntbl == NULL) | 302 | if (ntbl == NULL) |
305 | return -ENOMEM; | 303 | return -ENOMEM; |
306 | 304 | ||
@@ -341,7 +339,6 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink); | |||
341 | * rhashtable_insert - insert object into hash hash table | 339 | * rhashtable_insert - insert object into hash hash table |
342 | * @ht: hash table | 340 | * @ht: hash table |
343 | * @obj: pointer to hash head inside object | 341 | * @obj: pointer to hash head inside object |
344 | * @flags: allocation flags (table expansion) | ||
345 | * | 342 | * |
346 | * Will automatically grow the table via rhashtable_expand() if the the | 343 | * Will automatically grow the table via rhashtable_expand() if the the |
347 | * grow_decision function specified at rhashtable_init() returns true. | 344 | * grow_decision function specified at rhashtable_init() returns true. |
@@ -349,8 +346,7 @@ EXPORT_SYMBOL_GPL(rhashtable_shrink); | |||
349 | * The caller must ensure that no concurrent table mutations occur. It is | 346 | * The caller must ensure that no concurrent table mutations occur. It is |
350 | * however valid to have concurrent lookups if they are RCU protected. | 347 | * however valid to have concurrent lookups if they are RCU protected. |
351 | */ | 348 | */ |
352 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | 349 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) |
353 | gfp_t flags) | ||
354 | { | 350 | { |
355 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 351 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); |
356 | u32 hash; | 352 | u32 hash; |
@@ -363,7 +359,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | |||
363 | ht->nelems++; | 359 | ht->nelems++; |
364 | 360 | ||
365 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) | 361 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) |
366 | rhashtable_expand(ht, flags); | 362 | rhashtable_expand(ht); |
367 | } | 363 | } |
368 | EXPORT_SYMBOL_GPL(rhashtable_insert); | 364 | EXPORT_SYMBOL_GPL(rhashtable_insert); |
369 | 365 | ||
@@ -372,14 +368,13 @@ EXPORT_SYMBOL_GPL(rhashtable_insert); | |||
372 | * @ht: hash table | 368 | * @ht: hash table |
373 | * @obj: pointer to hash head inside object | 369 | * @obj: pointer to hash head inside object |
374 | * @pprev: pointer to previous element | 370 | * @pprev: pointer to previous element |
375 | * @flags: allocation flags (table expansion) | ||
376 | * | 371 | * |
377 | * Identical to rhashtable_remove() but caller is alreayd aware of the element | 372 | * Identical to rhashtable_remove() but caller is alreayd aware of the element |
378 | * in front of the element to be deleted. This is in particular useful for | 373 | * in front of the element to be deleted. This is in particular useful for |
379 | * deletion when combined with walking or lookup. | 374 | * deletion when combined with walking or lookup. |
380 | */ | 375 | */ |
381 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, | 376 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, |
382 | struct rhash_head __rcu **pprev, gfp_t flags) | 377 | struct rhash_head __rcu **pprev) |
383 | { | 378 | { |
384 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 379 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); |
385 | 380 | ||
@@ -390,7 +385,7 @@ void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, | |||
390 | 385 | ||
391 | if (ht->p.shrink_decision && | 386 | if (ht->p.shrink_decision && |
392 | ht->p.shrink_decision(ht, tbl->size)) | 387 | ht->p.shrink_decision(ht, tbl->size)) |
393 | rhashtable_shrink(ht, flags); | 388 | rhashtable_shrink(ht); |
394 | } | 389 | } |
395 | EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); | 390 | EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); |
396 | 391 | ||
@@ -398,7 +393,6 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); | |||
398 | * rhashtable_remove - remove object from hash table | 393 | * rhashtable_remove - remove object from hash table |
399 | * @ht: hash table | 394 | * @ht: hash table |
400 | * @obj: pointer to hash head inside object | 395 | * @obj: pointer to hash head inside object |
401 | * @flags: allocation flags (table expansion) | ||
402 | * | 396 | * |
403 | * Since the hash chain is single linked, the removal operation needs to | 397 | * Since the hash chain is single linked, the removal operation needs to |
404 | * walk the bucket chain upon removal. The removal operation is thus | 398 | * walk the bucket chain upon removal. The removal operation is thus |
@@ -410,8 +404,7 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); | |||
410 | * The caller must ensure that no concurrent table mutations occur. It is | 404 | * The caller must ensure that no concurrent table mutations occur. It is |
411 | * however valid to have concurrent lookups if they are RCU protected. | 405 | * however valid to have concurrent lookups if they are RCU protected. |
412 | */ | 406 | */ |
413 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, | 407 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) |
414 | gfp_t flags) | ||
415 | { | 408 | { |
416 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 409 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); |
417 | struct rhash_head __rcu **pprev; | 410 | struct rhash_head __rcu **pprev; |
@@ -429,7 +422,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, | |||
429 | continue; | 422 | continue; |
430 | } | 423 | } |
431 | 424 | ||
432 | rhashtable_remove_pprev(ht, he, pprev, flags); | 425 | rhashtable_remove_pprev(ht, he, pprev); |
433 | return true; | 426 | return true; |
434 | } | 427 | } |
435 | 428 | ||
@@ -576,7 +569,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | |||
576 | if (params->nelem_hint) | 569 | if (params->nelem_hint) |
577 | size = rounded_hashtable_size(params); | 570 | size = rounded_hashtable_size(params); |
578 | 571 | ||
579 | tbl = bucket_table_alloc(size, GFP_KERNEL); | 572 | tbl = bucket_table_alloc(size); |
580 | if (tbl == NULL) | 573 | if (tbl == NULL) |
581 | return -ENOMEM; | 574 | return -ENOMEM; |
582 | 575 | ||
@@ -713,7 +706,7 @@ static int __init test_rhashtable(struct rhashtable *ht) | |||
713 | obj->ptr = TEST_PTR; | 706 | obj->ptr = TEST_PTR; |
714 | obj->value = i * 2; | 707 | obj->value = i * 2; |
715 | 708 | ||
716 | rhashtable_insert(ht, &obj->node, GFP_KERNEL); | 709 | rhashtable_insert(ht, &obj->node); |
717 | } | 710 | } |
718 | 711 | ||
719 | rcu_read_lock(); | 712 | rcu_read_lock(); |
@@ -724,7 +717,7 @@ static int __init test_rhashtable(struct rhashtable *ht) | |||
724 | 717 | ||
725 | for (i = 0; i < TEST_NEXPANDS; i++) { | 718 | for (i = 0; i < TEST_NEXPANDS; i++) { |
726 | pr_info(" Table expansion iteration %u...\n", i); | 719 | pr_info(" Table expansion iteration %u...\n", i); |
727 | rhashtable_expand(ht, GFP_KERNEL); | 720 | rhashtable_expand(ht); |
728 | 721 | ||
729 | rcu_read_lock(); | 722 | rcu_read_lock(); |
730 | pr_info(" Verifying lookups...\n"); | 723 | pr_info(" Verifying lookups...\n"); |
@@ -734,7 +727,7 @@ static int __init test_rhashtable(struct rhashtable *ht) | |||
734 | 727 | ||
735 | for (i = 0; i < TEST_NEXPANDS; i++) { | 728 | for (i = 0; i < TEST_NEXPANDS; i++) { |
736 | pr_info(" Table shrinkage iteration %u...\n", i); | 729 | pr_info(" Table shrinkage iteration %u...\n", i); |
737 | rhashtable_shrink(ht, GFP_KERNEL); | 730 | rhashtable_shrink(ht); |
738 | 731 | ||
739 | rcu_read_lock(); | 732 | rcu_read_lock(); |
740 | pr_info(" Verifying lookups...\n"); | 733 | pr_info(" Verifying lookups...\n"); |
@@ -749,7 +742,7 @@ static int __init test_rhashtable(struct rhashtable *ht) | |||
749 | obj = rhashtable_lookup(ht, &key); | 742 | obj = rhashtable_lookup(ht, &key); |
750 | BUG_ON(!obj); | 743 | BUG_ON(!obj); |
751 | 744 | ||
752 | rhashtable_remove(ht, &obj->node, GFP_KERNEL); | 745 | rhashtable_remove(ht, &obj->node); |
753 | kfree(obj); | 746 | kfree(obj); |
754 | } | 747 | } |
755 | 748 | ||