aboutsummaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-09-26 15:47:03 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-26 15:47:03 -0400
commit1e1b37273cf719545da50b76f214f983a710aaf4 (patch)
tree033f6062325ef7aaeefe8559bb409ab7d2be3c76 /lib/rhashtable.c
parentc183a603e8d8a5a189729b77d0c623a3d5950e5f (diff)
parentc291b015158577be533dd5a959dfc09bab119eed (diff)
Merge branch 'x86/urgent' into x86/apic
Bring in the upstream modifications so we can fixup the silent merge conflict which is introduced by this merge. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 5d845ffd7982..56054e541a0f 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -30,7 +30,7 @@
30 30
31#define HASH_DEFAULT_SIZE 64UL 31#define HASH_DEFAULT_SIZE 64UL
32#define HASH_MIN_SIZE 4U 32#define HASH_MIN_SIZE 4U
33#define BUCKET_LOCKS_PER_CPU 128UL 33#define BUCKET_LOCKS_PER_CPU 32UL
34 34
35static u32 head_hashfn(struct rhashtable *ht, 35static u32 head_hashfn(struct rhashtable *ht,
36 const struct bucket_table *tbl, 36 const struct bucket_table *tbl,
@@ -70,21 +70,25 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
70 unsigned int nr_pcpus = num_possible_cpus(); 70 unsigned int nr_pcpus = num_possible_cpus();
71#endif 71#endif
72 72
73 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); 73 nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
74 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); 74 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
75 75
76 /* Never allocate more than 0.5 locks per bucket */ 76 /* Never allocate more than 0.5 locks per bucket */
77 size = min_t(unsigned int, size, tbl->size >> 1); 77 size = min_t(unsigned int, size, tbl->size >> 1);
78 78
79 if (sizeof(spinlock_t) != 0) { 79 if (sizeof(spinlock_t) != 0) {
80 tbl->locks = NULL;
80#ifdef CONFIG_NUMA 81#ifdef CONFIG_NUMA
81 if (size * sizeof(spinlock_t) > PAGE_SIZE && 82 if (size * sizeof(spinlock_t) > PAGE_SIZE &&
82 gfp == GFP_KERNEL) 83 gfp == GFP_KERNEL)
83 tbl->locks = vmalloc(size * sizeof(spinlock_t)); 84 tbl->locks = vmalloc(size * sizeof(spinlock_t));
84 else
85#endif 85#endif
86 tbl->locks = kmalloc_array(size, sizeof(spinlock_t), 86 if (gfp != GFP_KERNEL)
87 gfp); 87 gfp |= __GFP_NOWARN | __GFP_NORETRY;
88
89 if (!tbl->locks)
90 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
91 gfp);
88 if (!tbl->locks) 92 if (!tbl->locks)
89 return -ENOMEM; 93 return -ENOMEM;
90 for (i = 0; i < size; i++) 94 for (i = 0; i < size; i++)
@@ -321,12 +325,14 @@ static int rhashtable_expand(struct rhashtable *ht)
321static int rhashtable_shrink(struct rhashtable *ht) 325static int rhashtable_shrink(struct rhashtable *ht)
322{ 326{
323 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 327 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
324 unsigned int size; 328 unsigned int nelems = atomic_read(&ht->nelems);
329 unsigned int size = 0;
325 int err; 330 int err;
326 331
327 ASSERT_RHT_MUTEX(ht); 332 ASSERT_RHT_MUTEX(ht);
328 333
329 size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); 334 if (nelems)
335 size = roundup_pow_of_two(nelems * 3 / 2);
330 if (size < ht->p.min_size) 336 if (size < ht->p.min_size)
331 size = ht->p.min_size; 337 size = ht->p.min_size;
332 338