diff options
author | David S. Miller <davem@davemloft.net> | 2017-12-11 09:58:39 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-12-11 09:58:39 -0500 |
commit | 9944a0f2f502e4501fccb1dc0a64a6012c83dd97 (patch) | |
tree | 98769c3324dd57bc2070c08852c9bf1f272ba2b8 /lib/bucket_locks.c | |
parent | a0b586fa75a69578ecf10b40582eed9b35de2432 (diff) | |
parent | 64e0cd0d3540dbbdf6661943025409e6b31d5178 (diff) |
Merge branch 'rhashtable-New-features-in-walk-and-bucket'
Tom Herbert says:
====================
rhashtable: New features in walk and bucket
This patch contains some changes to related rhashtable:
- Above allow rhashtable_walk_start to return void
- Add a functon to peek at the next entry during a walk
- Abstract out function to compute a has for a table
- A library function to alloc a spinlocks bucket array
- Call the above function for rhashtable locks allocation
Tested: Exercised using various operations on an ILA xlat
table.
v2:
- Apply feedback from Herbert. Don't change semantics of resize
event reporting and -EAGAIN, just simplify API for callers that
ignore those.
- Add end_of_table in iter to reliably tell when the iterator has
reached to the eno.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/bucket_locks.c')
-rw-r--r-- | lib/bucket_locks.c | 54 |
1 files changed, 54 insertions, 0 deletions
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c new file mode 100644 index 000000000000..266a97c5708b --- /dev/null +++ b/lib/bucket_locks.c | |||
@@ -0,0 +1,54 @@ | |||
1 | #include <linux/export.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/mm.h> | ||
4 | #include <linux/slab.h> | ||
5 | #include <linux/vmalloc.h> | ||
6 | |||
7 | /* Allocate an array of spinlocks to be accessed by a hash. Two arguments | ||
8 | * indicate the number of elements to allocate in the array. max_size | ||
9 | * gives the maximum number of elements to allocate. cpu_mult gives | ||
10 | * the number of locks per CPU to allocate. The size is rounded up | ||
11 | * to a power of 2 to be suitable as a hash table. | ||
12 | */ | ||
13 | |||
14 | int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask, | ||
15 | size_t max_size, unsigned int cpu_mult, gfp_t gfp) | ||
16 | { | ||
17 | spinlock_t *tlocks = NULL; | ||
18 | unsigned int i, size; | ||
19 | #if defined(CONFIG_PROVE_LOCKING) | ||
20 | unsigned int nr_pcpus = 2; | ||
21 | #else | ||
22 | unsigned int nr_pcpus = num_possible_cpus(); | ||
23 | #endif | ||
24 | |||
25 | if (cpu_mult) { | ||
26 | nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL); | ||
27 | size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size); | ||
28 | } else { | ||
29 | size = max_size; | ||
30 | } | ||
31 | |||
32 | if (sizeof(spinlock_t) != 0) { | ||
33 | if (gfpflags_allow_blocking(gfp)) | ||
34 | tlocks = kvmalloc(size * sizeof(spinlock_t), gfp); | ||
35 | else | ||
36 | tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp); | ||
37 | if (!tlocks) | ||
38 | return -ENOMEM; | ||
39 | for (i = 0; i < size; i++) | ||
40 | spin_lock_init(&tlocks[i]); | ||
41 | } | ||
42 | |||
43 | *locks = tlocks; | ||
44 | *locks_mask = size - 1; | ||
45 | |||
46 | return 0; | ||
47 | } | ||
48 | EXPORT_SYMBOL(alloc_bucket_spinlocks); | ||
49 | |||
50 | void free_bucket_spinlocks(spinlock_t *locks) | ||
51 | { | ||
52 | kvfree(locks); | ||
53 | } | ||
54 | EXPORT_SYMBOL(free_bucket_spinlocks); | ||