aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/btree.c2
-rw-r--r--lib/dma-debug.c4
-rw-r--r--lib/proportions.c2
-rw-r--r--lib/rhashtable.c67
4 files changed, 44 insertions, 31 deletions
diff --git a/lib/btree.c b/lib/btree.c
index 4264871ea1a0..f93a945274af 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> 6 * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
7 * Bits and pieces stolen from Peter Zijlstra's code, which is 7 * Bits and pieces stolen from Peter Zijlstra's code, which is
8 * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright 2007, Red Hat Inc. Peter Zijlstra
9 * GPLv2 9 * GPLv2
10 * 10 *
11 * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch 11 * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 8855f019ebe8..d34bd24c2c84 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1464,7 +1464,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
1464 entry->type = dma_debug_coherent; 1464 entry->type = dma_debug_coherent;
1465 entry->dev = dev; 1465 entry->dev = dev;
1466 entry->pfn = page_to_pfn(virt_to_page(virt)); 1466 entry->pfn = page_to_pfn(virt_to_page(virt));
1467 entry->offset = (size_t) virt & PAGE_MASK; 1467 entry->offset = (size_t) virt & ~PAGE_MASK;
1468 entry->size = size; 1468 entry->size = size;
1469 entry->dev_addr = dma_addr; 1469 entry->dev_addr = dma_addr;
1470 entry->direction = DMA_BIDIRECTIONAL; 1470 entry->direction = DMA_BIDIRECTIONAL;
@@ -1480,7 +1480,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
1480 .type = dma_debug_coherent, 1480 .type = dma_debug_coherent,
1481 .dev = dev, 1481 .dev = dev,
1482 .pfn = page_to_pfn(virt_to_page(virt)), 1482 .pfn = page_to_pfn(virt_to_page(virt)),
1483 .offset = (size_t) virt & PAGE_MASK, 1483 .offset = (size_t) virt & ~PAGE_MASK,
1484 .dev_addr = addr, 1484 .dev_addr = addr,
1485 .size = size, 1485 .size = size,
1486 .direction = DMA_BIDIRECTIONAL, 1486 .direction = DMA_BIDIRECTIONAL,
diff --git a/lib/proportions.c b/lib/proportions.c
index 6f724298f67a..efa54f259ea9 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Floating proportions 2 * Floating proportions
3 * 3 *
4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
5 * 5 *
6 * Description: 6 * Description:
7 * 7 *
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a54ff8949f91..eb9240c458fa 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -389,33 +389,31 @@ static bool rhashtable_check_elasticity(struct rhashtable *ht,
389 return false; 389 return false;
390} 390}
391 391
392int rhashtable_insert_rehash(struct rhashtable *ht) 392int rhashtable_insert_rehash(struct rhashtable *ht,
393 struct bucket_table *tbl)
393{ 394{
394 struct bucket_table *old_tbl; 395 struct bucket_table *old_tbl;
395 struct bucket_table *new_tbl; 396 struct bucket_table *new_tbl;
396 struct bucket_table *tbl;
397 unsigned int size; 397 unsigned int size;
398 int err; 398 int err;
399 399
400 old_tbl = rht_dereference_rcu(ht->tbl, ht); 400 old_tbl = rht_dereference_rcu(ht->tbl, ht);
401 tbl = rhashtable_last_table(ht, old_tbl);
402 401
403 size = tbl->size; 402 size = tbl->size;
404 403
404 err = -EBUSY;
405
405 if (rht_grow_above_75(ht, tbl)) 406 if (rht_grow_above_75(ht, tbl))
406 size *= 2; 407 size *= 2;
407 /* Do not schedule more than one rehash */ 408 /* Do not schedule more than one rehash */
408 else if (old_tbl != tbl) 409 else if (old_tbl != tbl)
409 return -EBUSY; 410 goto fail;
411
412 err = -ENOMEM;
410 413
411 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); 414 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
412 if (new_tbl == NULL) { 415 if (new_tbl == NULL)
413 /* Schedule async resize/rehash to try allocation 416 goto fail;
414 * non-atomic context.
415 */
416 schedule_work(&ht->run_work);
417 return -ENOMEM;
418 }
419 417
420 err = rhashtable_rehash_attach(ht, tbl, new_tbl); 418 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
421 if (err) { 419 if (err) {
@@ -426,12 +424,24 @@ int rhashtable_insert_rehash(struct rhashtable *ht)
426 schedule_work(&ht->run_work); 424 schedule_work(&ht->run_work);
427 425
428 return err; 426 return err;
427
428fail:
429 /* Do not fail the insert if someone else did a rehash. */
430 if (likely(rcu_dereference_raw(tbl->future_tbl)))
431 return 0;
432
433 /* Schedule async rehash to retry allocation in process context. */
434 if (err == -ENOMEM)
435 schedule_work(&ht->run_work);
436
437 return err;
429} 438}
430EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); 439EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
431 440
432int rhashtable_insert_slow(struct rhashtable *ht, const void *key, 441struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
433 struct rhash_head *obj, 442 const void *key,
434 struct bucket_table *tbl) 443 struct rhash_head *obj,
444 struct bucket_table *tbl)
435{ 445{
436 struct rhash_head *head; 446 struct rhash_head *head;
437 unsigned int hash; 447 unsigned int hash;
@@ -467,7 +477,12 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
467exit: 477exit:
468 spin_unlock(rht_bucket_lock(tbl, hash)); 478 spin_unlock(rht_bucket_lock(tbl, hash));
469 479
470 return err; 480 if (err == 0)
481 return NULL;
482 else if (err == -EAGAIN)
483 return tbl;
484 else
485 return ERR_PTR(err);
471} 486}
472EXPORT_SYMBOL_GPL(rhashtable_insert_slow); 487EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
473 488
@@ -503,10 +518,10 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
503 if (!iter->walker) 518 if (!iter->walker)
504 return -ENOMEM; 519 return -ENOMEM;
505 520
506 mutex_lock(&ht->mutex); 521 spin_lock(&ht->lock);
507 iter->walker->tbl = rht_dereference(ht->tbl, ht); 522 iter->walker->tbl = rht_dereference(ht->tbl, ht);
508 list_add(&iter->walker->list, &iter->walker->tbl->walkers); 523 list_add(&iter->walker->list, &iter->walker->tbl->walkers);
509 mutex_unlock(&ht->mutex); 524 spin_unlock(&ht->lock);
510 525
511 return 0; 526 return 0;
512} 527}
@@ -520,10 +535,10 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
520 */ 535 */
521void rhashtable_walk_exit(struct rhashtable_iter *iter) 536void rhashtable_walk_exit(struct rhashtable_iter *iter)
522{ 537{
523 mutex_lock(&iter->ht->mutex); 538 spin_lock(&iter->ht->lock);
524 if (iter->walker->tbl) 539 if (iter->walker->tbl)
525 list_del(&iter->walker->list); 540 list_del(&iter->walker->list);
526 mutex_unlock(&iter->ht->mutex); 541 spin_unlock(&iter->ht->lock);
527 kfree(iter->walker); 542 kfree(iter->walker);
528} 543}
529EXPORT_SYMBOL_GPL(rhashtable_walk_exit); 544EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
@@ -547,14 +562,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
547{ 562{
548 struct rhashtable *ht = iter->ht; 563 struct rhashtable *ht = iter->ht;
549 564
550 mutex_lock(&ht->mutex); 565 rcu_read_lock();
551 566
567 spin_lock(&ht->lock);
552 if (iter->walker->tbl) 568 if (iter->walker->tbl)
553 list_del(&iter->walker->list); 569 list_del(&iter->walker->list);
554 570 spin_unlock(&ht->lock);
555 rcu_read_lock();
556
557 mutex_unlock(&ht->mutex);
558 571
559 if (!iter->walker->tbl) { 572 if (!iter->walker->tbl) {
560 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); 573 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
@@ -723,9 +736,6 @@ int rhashtable_init(struct rhashtable *ht,
723 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) 736 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
724 return -EINVAL; 737 return -EINVAL;
725 738
726 if (params->nelem_hint)
727 size = rounded_hashtable_size(params);
728
729 memset(ht, 0, sizeof(*ht)); 739 memset(ht, 0, sizeof(*ht));
730 mutex_init(&ht->mutex); 740 mutex_init(&ht->mutex);
731 spin_lock_init(&ht->lock); 741 spin_lock_init(&ht->lock);
@@ -745,6 +755,9 @@ int rhashtable_init(struct rhashtable *ht,
745 755
746 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); 756 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
747 757
758 if (params->nelem_hint)
759 size = rounded_hashtable_size(&ht->p);
760
748 /* The maximum (not average) chain length grows with the 761 /* The maximum (not average) chain length grows with the
749 * size of the hash table, at a rate of (log N)/(log log N). 762 * size of the hash table, at a rate of (log N)/(log log N).
750 * The value of 16 is selected so that even if the hash 763 * The value of 16 is selected so that even if the hash