diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
commit | 1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch) | |
tree | 44db563f64cf5f8d62af8f99a61e2b248c44ea3a /lib/rhashtable.c | |
parent | 03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff) | |
parent | f9eccf24615672896dc13251410c3f2f33a14f95 (diff) |
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano:
- Fix the vt8500 timer leading to a system lock up when dealing with too
small delta (Roman Volkov)
- Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST
(Daniel Lezcano)
- Prevent to compile timers using the 'iomem' API when the architecture has
not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r-- | lib/rhashtable.c | 67 |
1 files changed, 40 insertions, 27 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index a54ff8949f91..eb9240c458fa 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -389,33 +389,31 @@ static bool rhashtable_check_elasticity(struct rhashtable *ht, | |||
389 | return false; | 389 | return false; |
390 | } | 390 | } |
391 | 391 | ||
392 | int rhashtable_insert_rehash(struct rhashtable *ht) | 392 | int rhashtable_insert_rehash(struct rhashtable *ht, |
393 | struct bucket_table *tbl) | ||
393 | { | 394 | { |
394 | struct bucket_table *old_tbl; | 395 | struct bucket_table *old_tbl; |
395 | struct bucket_table *new_tbl; | 396 | struct bucket_table *new_tbl; |
396 | struct bucket_table *tbl; | ||
397 | unsigned int size; | 397 | unsigned int size; |
398 | int err; | 398 | int err; |
399 | 399 | ||
400 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | 400 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
401 | tbl = rhashtable_last_table(ht, old_tbl); | ||
402 | 401 | ||
403 | size = tbl->size; | 402 | size = tbl->size; |
404 | 403 | ||
404 | err = -EBUSY; | ||
405 | |||
405 | if (rht_grow_above_75(ht, tbl)) | 406 | if (rht_grow_above_75(ht, tbl)) |
406 | size *= 2; | 407 | size *= 2; |
407 | /* Do not schedule more than one rehash */ | 408 | /* Do not schedule more than one rehash */ |
408 | else if (old_tbl != tbl) | 409 | else if (old_tbl != tbl) |
409 | return -EBUSY; | 410 | goto fail; |
411 | |||
412 | err = -ENOMEM; | ||
410 | 413 | ||
411 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); | 414 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); |
412 | if (new_tbl == NULL) { | 415 | if (new_tbl == NULL) |
413 | /* Schedule async resize/rehash to try allocation | 416 | goto fail; |
414 | * non-atomic context. | ||
415 | */ | ||
416 | schedule_work(&ht->run_work); | ||
417 | return -ENOMEM; | ||
418 | } | ||
419 | 417 | ||
420 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); | 418 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); |
421 | if (err) { | 419 | if (err) { |
@@ -426,12 +424,24 @@ int rhashtable_insert_rehash(struct rhashtable *ht) | |||
426 | schedule_work(&ht->run_work); | 424 | schedule_work(&ht->run_work); |
427 | 425 | ||
428 | return err; | 426 | return err; |
427 | |||
428 | fail: | ||
429 | /* Do not fail the insert if someone else did a rehash. */ | ||
430 | if (likely(rcu_dereference_raw(tbl->future_tbl))) | ||
431 | return 0; | ||
432 | |||
433 | /* Schedule async rehash to retry allocation in process context. */ | ||
434 | if (err == -ENOMEM) | ||
435 | schedule_work(&ht->run_work); | ||
436 | |||
437 | return err; | ||
429 | } | 438 | } |
430 | EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); | 439 | EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); |
431 | 440 | ||
432 | int rhashtable_insert_slow(struct rhashtable *ht, const void *key, | 441 | struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, |
433 | struct rhash_head *obj, | 442 | const void *key, |
434 | struct bucket_table *tbl) | 443 | struct rhash_head *obj, |
444 | struct bucket_table *tbl) | ||
435 | { | 445 | { |
436 | struct rhash_head *head; | 446 | struct rhash_head *head; |
437 | unsigned int hash; | 447 | unsigned int hash; |
@@ -467,7 +477,12 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key, | |||
467 | exit: | 477 | exit: |
468 | spin_unlock(rht_bucket_lock(tbl, hash)); | 478 | spin_unlock(rht_bucket_lock(tbl, hash)); |
469 | 479 | ||
470 | return err; | 480 | if (err == 0) |
481 | return NULL; | ||
482 | else if (err == -EAGAIN) | ||
483 | return tbl; | ||
484 | else | ||
485 | return ERR_PTR(err); | ||
471 | } | 486 | } |
472 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); | 487 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); |
473 | 488 | ||
@@ -503,10 +518,10 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) | |||
503 | if (!iter->walker) | 518 | if (!iter->walker) |
504 | return -ENOMEM; | 519 | return -ENOMEM; |
505 | 520 | ||
506 | mutex_lock(&ht->mutex); | 521 | spin_lock(&ht->lock); |
507 | iter->walker->tbl = rht_dereference(ht->tbl, ht); | 522 | iter->walker->tbl = rht_dereference(ht->tbl, ht); |
508 | list_add(&iter->walker->list, &iter->walker->tbl->walkers); | 523 | list_add(&iter->walker->list, &iter->walker->tbl->walkers); |
509 | mutex_unlock(&ht->mutex); | 524 | spin_unlock(&ht->lock); |
510 | 525 | ||
511 | return 0; | 526 | return 0; |
512 | } | 527 | } |
@@ -520,10 +535,10 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init); | |||
520 | */ | 535 | */ |
521 | void rhashtable_walk_exit(struct rhashtable_iter *iter) | 536 | void rhashtable_walk_exit(struct rhashtable_iter *iter) |
522 | { | 537 | { |
523 | mutex_lock(&iter->ht->mutex); | 538 | spin_lock(&iter->ht->lock); |
524 | if (iter->walker->tbl) | 539 | if (iter->walker->tbl) |
525 | list_del(&iter->walker->list); | 540 | list_del(&iter->walker->list); |
526 | mutex_unlock(&iter->ht->mutex); | 541 | spin_unlock(&iter->ht->lock); |
527 | kfree(iter->walker); | 542 | kfree(iter->walker); |
528 | } | 543 | } |
529 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | 544 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); |
@@ -547,14 +562,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) | |||
547 | { | 562 | { |
548 | struct rhashtable *ht = iter->ht; | 563 | struct rhashtable *ht = iter->ht; |
549 | 564 | ||
550 | mutex_lock(&ht->mutex); | 565 | rcu_read_lock(); |
551 | 566 | ||
567 | spin_lock(&ht->lock); | ||
552 | if (iter->walker->tbl) | 568 | if (iter->walker->tbl) |
553 | list_del(&iter->walker->list); | 569 | list_del(&iter->walker->list); |
554 | 570 | spin_unlock(&ht->lock); | |
555 | rcu_read_lock(); | ||
556 | |||
557 | mutex_unlock(&ht->mutex); | ||
558 | 571 | ||
559 | if (!iter->walker->tbl) { | 572 | if (!iter->walker->tbl) { |
560 | iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); | 573 | iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); |
@@ -723,9 +736,6 @@ int rhashtable_init(struct rhashtable *ht, | |||
723 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) | 736 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) |
724 | return -EINVAL; | 737 | return -EINVAL; |
725 | 738 | ||
726 | if (params->nelem_hint) | ||
727 | size = rounded_hashtable_size(params); | ||
728 | |||
729 | memset(ht, 0, sizeof(*ht)); | 739 | memset(ht, 0, sizeof(*ht)); |
730 | mutex_init(&ht->mutex); | 740 | mutex_init(&ht->mutex); |
731 | spin_lock_init(&ht->lock); | 741 | spin_lock_init(&ht->lock); |
@@ -745,6 +755,9 @@ int rhashtable_init(struct rhashtable *ht, | |||
745 | 755 | ||
746 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); | 756 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); |
747 | 757 | ||
758 | if (params->nelem_hint) | ||
759 | size = rounded_hashtable_size(&ht->p); | ||
760 | |||
748 | /* The maximum (not average) chain length grows with the | 761 | /* The maximum (not average) chain length grows with the |
749 | * size of the hash table, at a rate of (log N)/(log log N). | 762 | * size of the hash table, at a rate of (log N)/(log log N). |
750 | * The value of 16 is selected so that even if the hash | 763 | * The value of 16 is selected so that even if the hash |