diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/btree.c | 2 | ||||
| -rw-r--r-- | lib/dma-debug.c | 4 | ||||
| -rw-r--r-- | lib/proportions.c | 2 | ||||
| -rw-r--r-- | lib/rhashtable.c | 70 |
4 files changed, 46 insertions, 32 deletions
diff --git a/lib/btree.c b/lib/btree.c index 4264871ea1a0..f93a945274af 100644 --- a/lib/btree.c +++ b/lib/btree.c | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | * | 5 | * |
| 6 | * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> | 6 | * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> |
| 7 | * Bits and pieces stolen from Peter Zijlstra's code, which is | 7 | * Bits and pieces stolen from Peter Zijlstra's code, which is |
| 8 | * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com> | 8 | * Copyright 2007, Red Hat Inc. Peter Zijlstra |
| 9 | * GPLv2 | 9 | * GPLv2 |
| 10 | * | 10 | * |
| 11 | * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch | 11 | * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 8855f019ebe8..d34bd24c2c84 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -1464,7 +1464,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, | |||
| 1464 | entry->type = dma_debug_coherent; | 1464 | entry->type = dma_debug_coherent; |
| 1465 | entry->dev = dev; | 1465 | entry->dev = dev; |
| 1466 | entry->pfn = page_to_pfn(virt_to_page(virt)); | 1466 | entry->pfn = page_to_pfn(virt_to_page(virt)); |
| 1467 | entry->offset = (size_t) virt & PAGE_MASK; | 1467 | entry->offset = (size_t) virt & ~PAGE_MASK; |
| 1468 | entry->size = size; | 1468 | entry->size = size; |
| 1469 | entry->dev_addr = dma_addr; | 1469 | entry->dev_addr = dma_addr; |
| 1470 | entry->direction = DMA_BIDIRECTIONAL; | 1470 | entry->direction = DMA_BIDIRECTIONAL; |
| @@ -1480,7 +1480,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size, | |||
| 1480 | .type = dma_debug_coherent, | 1480 | .type = dma_debug_coherent, |
| 1481 | .dev = dev, | 1481 | .dev = dev, |
| 1482 | .pfn = page_to_pfn(virt_to_page(virt)), | 1482 | .pfn = page_to_pfn(virt_to_page(virt)), |
| 1483 | .offset = (size_t) virt & PAGE_MASK, | 1483 | .offset = (size_t) virt & ~PAGE_MASK, |
| 1484 | .dev_addr = addr, | 1484 | .dev_addr = addr, |
| 1485 | .size = size, | 1485 | .size = size, |
| 1486 | .direction = DMA_BIDIRECTIONAL, | 1486 | .direction = DMA_BIDIRECTIONAL, |
diff --git a/lib/proportions.c b/lib/proportions.c index 6f724298f67a..efa54f259ea9 100644 --- a/lib/proportions.c +++ b/lib/proportions.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Floating proportions | 2 | * Floating proportions |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 4 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
| 5 | * | 5 | * |
| 6 | * Description: | 6 | * Description: |
| 7 | * | 7 | * |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index a54ff8949f91..51282f579760 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -389,33 +389,31 @@ static bool rhashtable_check_elasticity(struct rhashtable *ht, | |||
| 389 | return false; | 389 | return false; |
| 390 | } | 390 | } |
| 391 | 391 | ||
| 392 | int rhashtable_insert_rehash(struct rhashtable *ht) | 392 | int rhashtable_insert_rehash(struct rhashtable *ht, |
| 393 | struct bucket_table *tbl) | ||
| 393 | { | 394 | { |
| 394 | struct bucket_table *old_tbl; | 395 | struct bucket_table *old_tbl; |
| 395 | struct bucket_table *new_tbl; | 396 | struct bucket_table *new_tbl; |
| 396 | struct bucket_table *tbl; | ||
| 397 | unsigned int size; | 397 | unsigned int size; |
| 398 | int err; | 398 | int err; |
| 399 | 399 | ||
| 400 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | 400 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
| 401 | tbl = rhashtable_last_table(ht, old_tbl); | ||
| 402 | 401 | ||
| 403 | size = tbl->size; | 402 | size = tbl->size; |
| 404 | 403 | ||
| 404 | err = -EBUSY; | ||
| 405 | |||
| 405 | if (rht_grow_above_75(ht, tbl)) | 406 | if (rht_grow_above_75(ht, tbl)) |
| 406 | size *= 2; | 407 | size *= 2; |
| 407 | /* Do not schedule more than one rehash */ | 408 | /* Do not schedule more than one rehash */ |
| 408 | else if (old_tbl != tbl) | 409 | else if (old_tbl != tbl) |
| 409 | return -EBUSY; | 410 | goto fail; |
| 411 | |||
| 412 | err = -ENOMEM; | ||
| 410 | 413 | ||
| 411 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); | 414 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); |
| 412 | if (new_tbl == NULL) { | 415 | if (new_tbl == NULL) |
| 413 | /* Schedule async resize/rehash to try allocation | 416 | goto fail; |
| 414 | * non-atomic context. | ||
| 415 | */ | ||
| 416 | schedule_work(&ht->run_work); | ||
| 417 | return -ENOMEM; | ||
| 418 | } | ||
| 419 | 417 | ||
| 420 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); | 418 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); |
| 421 | if (err) { | 419 | if (err) { |
| @@ -426,12 +424,24 @@ int rhashtable_insert_rehash(struct rhashtable *ht) | |||
| 426 | schedule_work(&ht->run_work); | 424 | schedule_work(&ht->run_work); |
| 427 | 425 | ||
| 428 | return err; | 426 | return err; |
| 427 | |||
| 428 | fail: | ||
| 429 | /* Do not fail the insert if someone else did a rehash. */ | ||
| 430 | if (likely(rcu_dereference_raw(tbl->future_tbl))) | ||
| 431 | return 0; | ||
| 432 | |||
| 433 | /* Schedule async rehash to retry allocation in process context. */ | ||
| 434 | if (err == -ENOMEM) | ||
| 435 | schedule_work(&ht->run_work); | ||
| 436 | |||
| 437 | return err; | ||
| 429 | } | 438 | } |
| 430 | EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); | 439 | EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); |
| 431 | 440 | ||
| 432 | int rhashtable_insert_slow(struct rhashtable *ht, const void *key, | 441 | struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, |
| 433 | struct rhash_head *obj, | 442 | const void *key, |
| 434 | struct bucket_table *tbl) | 443 | struct rhash_head *obj, |
| 444 | struct bucket_table *tbl) | ||
| 435 | { | 445 | { |
| 436 | struct rhash_head *head; | 446 | struct rhash_head *head; |
| 437 | unsigned int hash; | 447 | unsigned int hash; |
| @@ -467,7 +477,12 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key, | |||
| 467 | exit: | 477 | exit: |
| 468 | spin_unlock(rht_bucket_lock(tbl, hash)); | 478 | spin_unlock(rht_bucket_lock(tbl, hash)); |
| 469 | 479 | ||
| 470 | return err; | 480 | if (err == 0) |
| 481 | return NULL; | ||
| 482 | else if (err == -EAGAIN) | ||
| 483 | return tbl; | ||
| 484 | else | ||
| 485 | return ERR_PTR(err); | ||
| 471 | } | 486 | } |
| 472 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); | 487 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); |
| 473 | 488 | ||
| @@ -503,10 +518,11 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) | |||
| 503 | if (!iter->walker) | 518 | if (!iter->walker) |
| 504 | return -ENOMEM; | 519 | return -ENOMEM; |
| 505 | 520 | ||
| 506 | mutex_lock(&ht->mutex); | 521 | spin_lock(&ht->lock); |
| 507 | iter->walker->tbl = rht_dereference(ht->tbl, ht); | 522 | iter->walker->tbl = |
| 523 | rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); | ||
| 508 | list_add(&iter->walker->list, &iter->walker->tbl->walkers); | 524 | list_add(&iter->walker->list, &iter->walker->tbl->walkers); |
| 509 | mutex_unlock(&ht->mutex); | 525 | spin_unlock(&ht->lock); |
| 510 | 526 | ||
| 511 | return 0; | 527 | return 0; |
| 512 | } | 528 | } |
| @@ -520,10 +536,10 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init); | |||
| 520 | */ | 536 | */ |
| 521 | void rhashtable_walk_exit(struct rhashtable_iter *iter) | 537 | void rhashtable_walk_exit(struct rhashtable_iter *iter) |
| 522 | { | 538 | { |
| 523 | mutex_lock(&iter->ht->mutex); | 539 | spin_lock(&iter->ht->lock); |
| 524 | if (iter->walker->tbl) | 540 | if (iter->walker->tbl) |
| 525 | list_del(&iter->walker->list); | 541 | list_del(&iter->walker->list); |
| 526 | mutex_unlock(&iter->ht->mutex); | 542 | spin_unlock(&iter->ht->lock); |
| 527 | kfree(iter->walker); | 543 | kfree(iter->walker); |
| 528 | } | 544 | } |
| 529 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | 545 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); |
| @@ -547,14 +563,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) | |||
| 547 | { | 563 | { |
| 548 | struct rhashtable *ht = iter->ht; | 564 | struct rhashtable *ht = iter->ht; |
| 549 | 565 | ||
| 550 | mutex_lock(&ht->mutex); | 566 | rcu_read_lock(); |
| 551 | 567 | ||
| 568 | spin_lock(&ht->lock); | ||
| 552 | if (iter->walker->tbl) | 569 | if (iter->walker->tbl) |
| 553 | list_del(&iter->walker->list); | 570 | list_del(&iter->walker->list); |
| 554 | 571 | spin_unlock(&ht->lock); | |
| 555 | rcu_read_lock(); | ||
| 556 | |||
| 557 | mutex_unlock(&ht->mutex); | ||
| 558 | 572 | ||
| 559 | if (!iter->walker->tbl) { | 573 | if (!iter->walker->tbl) { |
| 560 | iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); | 574 | iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); |
| @@ -723,9 +737,6 @@ int rhashtable_init(struct rhashtable *ht, | |||
| 723 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) | 737 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) |
| 724 | return -EINVAL; | 738 | return -EINVAL; |
| 725 | 739 | ||
| 726 | if (params->nelem_hint) | ||
| 727 | size = rounded_hashtable_size(params); | ||
| 728 | |||
| 729 | memset(ht, 0, sizeof(*ht)); | 740 | memset(ht, 0, sizeof(*ht)); |
| 730 | mutex_init(&ht->mutex); | 741 | mutex_init(&ht->mutex); |
| 731 | spin_lock_init(&ht->lock); | 742 | spin_lock_init(&ht->lock); |
| @@ -745,6 +756,9 @@ int rhashtable_init(struct rhashtable *ht, | |||
| 745 | 756 | ||
| 746 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); | 757 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); |
| 747 | 758 | ||
| 759 | if (params->nelem_hint) | ||
| 760 | size = rounded_hashtable_size(&ht->p); | ||
| 761 | |||
| 748 | /* The maximum (not average) chain length grows with the | 762 | /* The maximum (not average) chain length grows with the |
| 749 | * size of the hash table, at a rate of (log N)/(log log N). | 763 | * size of the hash table, at a rate of (log N)/(log log N). |
| 750 | * The value of 16 is selected so that even if the hash | 764 | * The value of 16 is selected so that even if the hash |
