diff options
author | David S. Miller <davem@davemloft.net> | 2014-08-02 22:49:47 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-08-02 22:49:47 -0400 |
commit | bae2e81a69c3f0e93707b36a5a655ca0e365a78d (patch) | |
tree | 09f0da1fdd48a931ecded6a31d54bf3a07e8febb /lib | |
parent | d39a9ffce7f14b494391da982b8cefa311dae0f6 (diff) | |
parent | cfe4a9dda034e2b5b6ba0b6313b65dfb89ee451c (diff) |
Merge branch 'concurrent_hash_tables'
Thomas Graf says:
====================
Lockless netlink_lookup() with new concurrent hash table
Netlink sockets are maintained in a hash table to allow efficient lookup
via the port ID for unicast messages. However, lookups currently require
a read lock to be taken. This series adds a new generic, resizable,
scalable, concurrent hash table based on the paper referenced in the first
patch. It then makes use of the new data type to implement lockless
netlink_lookup().
Patch 3/3 to convert nft_hash is included for reference but should be
merged via the netfilter tree. Inclusion in this series is to provide
context for the suggested API.
Against net-next since the initial user of the new hash table is in net/
Changes:
v4-v5:
- use GFP_KERNEL to alloc Netlink buckets as suggested by Nikolay
Aleksandrov
- free nft hash element on removal as spotted by Nikolay Aleksandrov
and Patrick McHardy
v3-v4:
- fixed wrong shift assignment placement as spotted by Nikolay Aleksandrov
- reverted default size of nft_hash to 4 as requested by Patrick McHardy,
default size for other hash tables remains at 64 if no hint is given
- fixed copyright as requested by Patrick McHardy
v2-v3:
- fixed typo in nft_hash_destroy() when passing rhashtable handle
v1-v2:
- fixed traversal off-by-one as spotted by Tobias Klauser
- removed unlikely() from BUG_ON() as spotted by Josh Triplett
- new 3rd patch to convert nft_hash to rhashtable
- make rhashtable_insert() return void
- nl_sk_hash_lock must be a mutex
- fixed wrong name of rht_shrink_below_30()
- exported symbols rht_grow_above_75() and rht_shrink_below_30()
- allow table freeing with RCU callback
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 8 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/rhashtable.c | 797 |
3 files changed, 806 insertions, 1 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7a638aa3545b..f11a2e8f6157 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -1550,6 +1550,14 @@ config TEST_STRING_HELPERS | |||
1550 | config TEST_KSTRTOX | 1550 | config TEST_KSTRTOX |
1551 | tristate "Test kstrto*() family of functions at runtime" | 1551 | tristate "Test kstrto*() family of functions at runtime" |
1552 | 1552 | ||
1553 | config TEST_RHASHTABLE | ||
1554 | bool "Perform selftest on resizable hash table" | ||
1555 | default n | ||
1556 | help | ||
1557 | Enable this option to test the rhashtable functions at boot. | ||
1558 | |||
1559 | If unsure, say N. | ||
1560 | |||
1553 | endmenu # runtime tests | 1561 | endmenu # runtime tests |
1554 | 1562 | ||
1555 | config PROVIDE_OHCI1394_DMA_INIT | 1563 | config PROVIDE_OHCI1394_DMA_INIT |
diff --git a/lib/Makefile b/lib/Makefile index ba967a19edba..fd248e4c05ad 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | |||
26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ | 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ |
28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ | 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ |
29 | percpu-refcount.o percpu_ida.o hash.o | 29 | percpu-refcount.o percpu_ida.o hash.o rhashtable.o |
30 | obj-y += string_helpers.o | 30 | obj-y += string_helpers.o |
31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
32 | obj-y += kstrtox.o | 32 | obj-y += kstrtox.o |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c new file mode 100644 index 000000000000..e6940cf16628 --- /dev/null +++ b/lib/rhashtable.c | |||
@@ -0,0 +1,797 @@ | |||
1 | /* | ||
2 | * Resizable, Scalable, Concurrent Hash Table | ||
3 | * | ||
4 | * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> | ||
5 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> | ||
6 | * | ||
7 | * Based on the following paper: | ||
8 | * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf | ||
9 | * | ||
10 | * Code partially derived from nft_hash | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/log2.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/vmalloc.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/hash.h> | ||
24 | #include <linux/random.h> | ||
25 | #include <linux/rhashtable.h> | ||
26 | #include <linux/log2.h> | ||
27 | |||
28 | #define HASH_DEFAULT_SIZE 64UL | ||
29 | #define HASH_MIN_SIZE 4UL | ||
30 | |||
31 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) | ||
32 | |||
33 | #ifdef CONFIG_PROVE_LOCKING | ||
34 | int lockdep_rht_mutex_is_held(const struct rhashtable *ht) | ||
35 | { | ||
36 | return ht->p.mutex_is_held(); | ||
37 | } | ||
38 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); | ||
39 | #endif | ||
40 | |||
41 | /** | ||
42 | * rht_obj - cast hash head to outer object | ||
43 | * @ht: hash table | ||
44 | * @he: hashed node | ||
45 | */ | ||
46 | void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) | ||
47 | { | ||
48 | return (void *) he - ht->p.head_offset; | ||
49 | } | ||
50 | EXPORT_SYMBOL_GPL(rht_obj); | ||
51 | |||
52 | static u32 __hashfn(const struct rhashtable *ht, const void *key, | ||
53 | u32 len, u32 hsize) | ||
54 | { | ||
55 | u32 h; | ||
56 | |||
57 | h = ht->p.hashfn(key, len, ht->p.hash_rnd); | ||
58 | |||
59 | return h & (hsize - 1); | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * rhashtable_hashfn - compute hash for key of given length | ||
64 | * @ht: hash table to compuate for | ||
65 | * @key: pointer to key | ||
66 | * @len: length of key | ||
67 | * | ||
68 | * Computes the hash value using the hash function provided in the 'hashfn' | ||
69 | * of struct rhashtable_params. The returned value is guaranteed to be | ||
70 | * smaller than the number of buckets in the hash table. | ||
71 | */ | ||
72 | u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len) | ||
73 | { | ||
74 | struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | ||
75 | |||
76 | return __hashfn(ht, key, len, tbl->size); | ||
77 | } | ||
78 | EXPORT_SYMBOL_GPL(rhashtable_hashfn); | ||
79 | |||
80 | static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize) | ||
81 | { | ||
82 | if (unlikely(!ht->p.key_len)) { | ||
83 | u32 h; | ||
84 | |||
85 | h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); | ||
86 | |||
87 | return h & (hsize - 1); | ||
88 | } | ||
89 | |||
90 | return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize); | ||
91 | } | ||
92 | |||
93 | /** | ||
94 | * rhashtable_obj_hashfn - compute hash for hashed object | ||
95 | * @ht: hash table to compuate for | ||
96 | * @ptr: pointer to hashed object | ||
97 | * | ||
98 | * Computes the hash value using the hash function `hashfn` respectively | ||
99 | * 'obj_hashfn' depending on whether the hash table is set up to work with | ||
100 | * a fixed length key. The returned value is guaranteed to be smaller than | ||
101 | * the number of buckets in the hash table. | ||
102 | */ | ||
103 | u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr) | ||
104 | { | ||
105 | struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | ||
106 | |||
107 | return obj_hashfn(ht, ptr, tbl->size); | ||
108 | } | ||
109 | EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn); | ||
110 | |||
111 | static u32 head_hashfn(const struct rhashtable *ht, | ||
112 | const struct rhash_head *he, u32 hsize) | ||
113 | { | ||
114 | return obj_hashfn(ht, rht_obj(ht, he), hsize); | ||
115 | } | ||
116 | |||
117 | static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags) | ||
118 | { | ||
119 | struct bucket_table *tbl; | ||
120 | size_t size; | ||
121 | |||
122 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | ||
123 | tbl = kzalloc(size, flags); | ||
124 | if (tbl == NULL) | ||
125 | tbl = vzalloc(size); | ||
126 | |||
127 | if (tbl == NULL) | ||
128 | return NULL; | ||
129 | |||
130 | tbl->size = nbuckets; | ||
131 | |||
132 | return tbl; | ||
133 | } | ||
134 | |||
135 | static void bucket_table_free(const struct bucket_table *tbl) | ||
136 | { | ||
137 | kvfree(tbl); | ||
138 | } | ||
139 | |||
140 | /** | ||
141 | * rht_grow_above_75 - returns true if nelems > 0.75 * table-size | ||
142 | * @ht: hash table | ||
143 | * @new_size: new table size | ||
144 | */ | ||
145 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) | ||
146 | { | ||
147 | /* Expand table when exceeding 75% load */ | ||
148 | return ht->nelems > (new_size / 4 * 3); | ||
149 | } | ||
150 | EXPORT_SYMBOL_GPL(rht_grow_above_75); | ||
151 | |||
152 | /** | ||
153 | * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size | ||
154 | * @ht: hash table | ||
155 | * @new_size: new table size | ||
156 | */ | ||
157 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) | ||
158 | { | ||
159 | /* Shrink table beneath 30% load */ | ||
160 | return ht->nelems < (new_size * 3 / 10); | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(rht_shrink_below_30); | ||
163 | |||
164 | static void hashtable_chain_unzip(const struct rhashtable *ht, | ||
165 | const struct bucket_table *new_tbl, | ||
166 | struct bucket_table *old_tbl, size_t n) | ||
167 | { | ||
168 | struct rhash_head *he, *p, *next; | ||
169 | unsigned int h; | ||
170 | |||
171 | /* Old bucket empty, no work needed. */ | ||
172 | p = rht_dereference(old_tbl->buckets[n], ht); | ||
173 | if (!p) | ||
174 | return; | ||
175 | |||
176 | /* Advance the old bucket pointer one or more times until it | ||
177 | * reaches a node that doesn't hash to the same bucket as the | ||
178 | * previous node p. Call the previous node p; | ||
179 | */ | ||
180 | h = head_hashfn(ht, p, new_tbl->size); | ||
181 | rht_for_each(he, p->next, ht) { | ||
182 | if (head_hashfn(ht, he, new_tbl->size) != h) | ||
183 | break; | ||
184 | p = he; | ||
185 | } | ||
186 | RCU_INIT_POINTER(old_tbl->buckets[n], p->next); | ||
187 | |||
188 | /* Find the subsequent node which does hash to the same | ||
189 | * bucket as node P, or NULL if no such node exists. | ||
190 | */ | ||
191 | next = NULL; | ||
192 | if (he) { | ||
193 | rht_for_each(he, he->next, ht) { | ||
194 | if (head_hashfn(ht, he, new_tbl->size) == h) { | ||
195 | next = he; | ||
196 | break; | ||
197 | } | ||
198 | } | ||
199 | } | ||
200 | |||
201 | /* Set p's next pointer to that subsequent node pointer, | ||
202 | * bypassing the nodes which do not hash to p's bucket | ||
203 | */ | ||
204 | RCU_INIT_POINTER(p->next, next); | ||
205 | } | ||
206 | |||
207 | /** | ||
208 | * rhashtable_expand - Expand hash table while allowing concurrent lookups | ||
209 | * @ht: the hash table to expand | ||
210 | * @flags: allocation flags | ||
211 | * | ||
212 | * A secondary bucket array is allocated and the hash entries are migrated | ||
213 | * while keeping them on both lists until the end of the RCU grace period. | ||
214 | * | ||
215 | * This function may only be called in a context where it is safe to call | ||
216 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | ||
217 | * | ||
218 | * The caller must ensure that no concurrent table mutations take place. | ||
219 | * It is however valid to have concurrent lookups if they are RCU protected. | ||
220 | */ | ||
221 | int rhashtable_expand(struct rhashtable *ht, gfp_t flags) | ||
222 | { | ||
223 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | ||
224 | struct rhash_head *he; | ||
225 | unsigned int i, h; | ||
226 | bool complete; | ||
227 | |||
228 | ASSERT_RHT_MUTEX(ht); | ||
229 | |||
230 | if (ht->p.max_shift && ht->shift >= ht->p.max_shift) | ||
231 | return 0; | ||
232 | |||
233 | new_tbl = bucket_table_alloc(old_tbl->size * 2, flags); | ||
234 | if (new_tbl == NULL) | ||
235 | return -ENOMEM; | ||
236 | |||
237 | ht->shift++; | ||
238 | |||
239 | /* For each new bucket, search the corresponding old bucket | ||
240 | * for the first entry that hashes to the new bucket, and | ||
241 | * link the new bucket to that entry. Since all the entries | ||
242 | * which will end up in the new bucket appear in the same | ||
243 | * old bucket, this constructs an entirely valid new hash | ||
244 | * table, but with multiple buckets "zipped" together into a | ||
245 | * single imprecise chain. | ||
246 | */ | ||
247 | for (i = 0; i < new_tbl->size; i++) { | ||
248 | h = i & (old_tbl->size - 1); | ||
249 | rht_for_each(he, old_tbl->buckets[h], ht) { | ||
250 | if (head_hashfn(ht, he, new_tbl->size) == i) { | ||
251 | RCU_INIT_POINTER(new_tbl->buckets[i], he); | ||
252 | break; | ||
253 | } | ||
254 | } | ||
255 | } | ||
256 | |||
257 | /* Publish the new table pointer. Lookups may now traverse | ||
258 | * the new table, but they will not benefit from any | ||
259 | * additional efficiency until later steps unzip the buckets. | ||
260 | */ | ||
261 | rcu_assign_pointer(ht->tbl, new_tbl); | ||
262 | |||
263 | /* Unzip interleaved hash chains */ | ||
264 | do { | ||
265 | /* Wait for readers. All new readers will see the new | ||
266 | * table, and thus no references to the old table will | ||
267 | * remain. | ||
268 | */ | ||
269 | synchronize_rcu(); | ||
270 | |||
271 | /* For each bucket in the old table (each of which | ||
272 | * contains items from multiple buckets of the new | ||
273 | * table): ... | ||
274 | */ | ||
275 | complete = true; | ||
276 | for (i = 0; i < old_tbl->size; i++) { | ||
277 | hashtable_chain_unzip(ht, new_tbl, old_tbl, i); | ||
278 | if (old_tbl->buckets[i] != NULL) | ||
279 | complete = false; | ||
280 | } | ||
281 | } while (!complete); | ||
282 | |||
283 | bucket_table_free(old_tbl); | ||
284 | return 0; | ||
285 | } | ||
286 | EXPORT_SYMBOL_GPL(rhashtable_expand); | ||
287 | |||
288 | /** | ||
289 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups | ||
290 | * @ht: the hash table to shrink | ||
291 | * @flags: allocation flags | ||
292 | * | ||
293 | * This function may only be called in a context where it is safe to call | ||
294 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | ||
295 | * | ||
296 | * The caller must ensure that no concurrent table mutations take place. | ||
297 | * It is however valid to have concurrent lookups if they are RCU protected. | ||
298 | */ | ||
299 | int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) | ||
300 | { | ||
301 | struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); | ||
302 | struct rhash_head __rcu **pprev; | ||
303 | unsigned int i; | ||
304 | |||
305 | ASSERT_RHT_MUTEX(ht); | ||
306 | |||
307 | if (tbl->size <= HASH_MIN_SIZE) | ||
308 | return 0; | ||
309 | |||
310 | ntbl = bucket_table_alloc(tbl->size / 2, flags); | ||
311 | if (ntbl == NULL) | ||
312 | return -ENOMEM; | ||
313 | |||
314 | ht->shift--; | ||
315 | |||
316 | /* Link each bucket in the new table to the first bucket | ||
317 | * in the old table that contains entries which will hash | ||
318 | * to the new bucket. | ||
319 | */ | ||
320 | for (i = 0; i < ntbl->size; i++) { | ||
321 | ntbl->buckets[i] = tbl->buckets[i]; | ||
322 | |||
323 | /* Link each bucket in the new table to the first bucket | ||
324 | * in the old table that contains entries which will hash | ||
325 | * to the new bucket. | ||
326 | */ | ||
327 | for (pprev = &ntbl->buckets[i]; *pprev != NULL; | ||
328 | pprev = &rht_dereference(*pprev, ht)->next) | ||
329 | ; | ||
330 | RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]); | ||
331 | } | ||
332 | |||
333 | /* Publish the new, valid hash table */ | ||
334 | rcu_assign_pointer(ht->tbl, ntbl); | ||
335 | |||
336 | /* Wait for readers. No new readers will have references to the | ||
337 | * old hash table. | ||
338 | */ | ||
339 | synchronize_rcu(); | ||
340 | |||
341 | bucket_table_free(tbl); | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | EXPORT_SYMBOL_GPL(rhashtable_shrink); | ||
346 | |||
347 | /** | ||
348 | * rhashtable_insert - insert object into hash hash table | ||
349 | * @ht: hash table | ||
350 | * @obj: pointer to hash head inside object | ||
351 | * @flags: allocation flags (table expansion) | ||
352 | * | ||
353 | * Will automatically grow the table via rhashtable_expand() if the the | ||
354 | * grow_decision function specified at rhashtable_init() returns true. | ||
355 | * | ||
356 | * The caller must ensure that no concurrent table mutations occur. It is | ||
357 | * however valid to have concurrent lookups if they are RCU protected. | ||
358 | */ | ||
359 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | ||
360 | gfp_t flags) | ||
361 | { | ||
362 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | ||
363 | u32 hash; | ||
364 | |||
365 | ASSERT_RHT_MUTEX(ht); | ||
366 | |||
367 | hash = head_hashfn(ht, obj, tbl->size); | ||
368 | RCU_INIT_POINTER(obj->next, tbl->buckets[hash]); | ||
369 | rcu_assign_pointer(tbl->buckets[hash], obj); | ||
370 | ht->nelems++; | ||
371 | |||
372 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) | ||
373 | rhashtable_expand(ht, flags); | ||
374 | } | ||
375 | EXPORT_SYMBOL_GPL(rhashtable_insert); | ||
376 | |||
377 | /** | ||
378 | * rhashtable_remove_pprev - remove object from hash table given previous element | ||
379 | * @ht: hash table | ||
380 | * @obj: pointer to hash head inside object | ||
381 | * @pprev: pointer to previous element | ||
382 | * @flags: allocation flags (table expansion) | ||
383 | * | ||
384 | * Identical to rhashtable_remove() but caller is alreayd aware of the element | ||
385 | * in front of the element to be deleted. This is in particular useful for | ||
386 | * deletion when combined with walking or lookup. | ||
387 | */ | ||
388 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, | ||
389 | struct rhash_head **pprev, gfp_t flags) | ||
390 | { | ||
391 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | ||
392 | |||
393 | ASSERT_RHT_MUTEX(ht); | ||
394 | |||
395 | RCU_INIT_POINTER(*pprev, obj->next); | ||
396 | ht->nelems--; | ||
397 | |||
398 | if (ht->p.shrink_decision && | ||
399 | ht->p.shrink_decision(ht, tbl->size)) | ||
400 | rhashtable_shrink(ht, flags); | ||
401 | } | ||
402 | EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); | ||
403 | |||
404 | /** | ||
405 | * rhashtable_remove - remove object from hash table | ||
406 | * @ht: hash table | ||
407 | * @obj: pointer to hash head inside object | ||
408 | * @flags: allocation flags (table expansion) | ||
409 | * | ||
410 | * Since the hash chain is single linked, the removal operation needs to | ||
411 | * walk the bucket chain upon removal. The removal operation is thus | ||
412 | * considerable slow if the hash table is not correctly sized. | ||
413 | * | ||
414 | * Will automatically shrink the table via rhashtable_expand() if the the | ||
415 | * shrink_decision function specified at rhashtable_init() returns true. | ||
416 | * | ||
417 | * The caller must ensure that no concurrent table mutations occur. It is | ||
418 | * however valid to have concurrent lookups if they are RCU protected. | ||
419 | */ | ||
420 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, | ||
421 | gfp_t flags) | ||
422 | { | ||
423 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | ||
424 | struct rhash_head __rcu **pprev; | ||
425 | struct rhash_head *he; | ||
426 | u32 h; | ||
427 | |||
428 | ASSERT_RHT_MUTEX(ht); | ||
429 | |||
430 | h = head_hashfn(ht, obj, tbl->size); | ||
431 | |||
432 | pprev = &tbl->buckets[h]; | ||
433 | rht_for_each(he, tbl->buckets[h], ht) { | ||
434 | if (he != obj) { | ||
435 | pprev = &he->next; | ||
436 | continue; | ||
437 | } | ||
438 | |||
439 | rhashtable_remove_pprev(ht, he, pprev, flags); | ||
440 | return true; | ||
441 | } | ||
442 | |||
443 | return false; | ||
444 | } | ||
445 | EXPORT_SYMBOL_GPL(rhashtable_remove); | ||
446 | |||
447 | /** | ||
448 | * rhashtable_lookup - lookup key in hash table | ||
449 | * @ht: hash table | ||
450 | * @key: pointer to key | ||
451 | * | ||
452 | * Computes the hash value for the key and traverses the bucket chain looking | ||
453 | * for a entry with an identical key. The first matching entry is returned. | ||
454 | * | ||
455 | * This lookup function may only be used for fixed key hash table (key_len | ||
456 | * paramter set). It will BUG() if used inappropriately. | ||
457 | * | ||
458 | * Lookups may occur in parallel with hash mutations as long as the lookup is | ||
459 | * guarded by rcu_read_lock(). The caller must take care of this. | ||
460 | */ | ||
461 | void *rhashtable_lookup(const struct rhashtable *ht, const void *key) | ||
462 | { | ||
463 | const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | ||
464 | struct rhash_head *he; | ||
465 | u32 h; | ||
466 | |||
467 | BUG_ON(!ht->p.key_len); | ||
468 | |||
469 | h = __hashfn(ht, key, ht->p.key_len, tbl->size); | ||
470 | rht_for_each_rcu(he, tbl->buckets[h], ht) { | ||
471 | if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, | ||
472 | ht->p.key_len)) | ||
473 | continue; | ||
474 | return (void *) he - ht->p.head_offset; | ||
475 | } | ||
476 | |||
477 | return NULL; | ||
478 | } | ||
479 | EXPORT_SYMBOL_GPL(rhashtable_lookup); | ||
480 | |||
481 | /** | ||
482 | * rhashtable_lookup_compare - search hash table with compare function | ||
483 | * @ht: hash table | ||
484 | * @hash: hash value of desired entry | ||
485 | * @compare: compare function, must return true on match | ||
486 | * @arg: argument passed on to compare function | ||
487 | * | ||
488 | * Traverses the bucket chain behind the provided hash value and calls the | ||
489 | * specified compare function for each entry. | ||
490 | * | ||
491 | * Lookups may occur in parallel with hash mutations as long as the lookup is | ||
492 | * guarded by rcu_read_lock(). The caller must take care of this. | ||
493 | * | ||
494 | * Returns the first entry on which the compare function returned true. | ||
495 | */ | ||
496 | void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, | ||
497 | bool (*compare)(void *, void *), void *arg) | ||
498 | { | ||
499 | const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | ||
500 | struct rhash_head *he; | ||
501 | |||
502 | if (unlikely(hash >= tbl->size)) | ||
503 | return NULL; | ||
504 | |||
505 | rht_for_each_rcu(he, tbl->buckets[hash], ht) { | ||
506 | if (!compare(rht_obj(ht, he), arg)) | ||
507 | continue; | ||
508 | return (void *) he - ht->p.head_offset; | ||
509 | } | ||
510 | |||
511 | return NULL; | ||
512 | } | ||
513 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); | ||
514 | |||
515 | static size_t rounded_hashtable_size(unsigned int nelem) | ||
516 | { | ||
517 | return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE); | ||
518 | } | ||
519 | |||
520 | /** | ||
521 | * rhashtable_init - initialize a new hash table | ||
522 | * @ht: hash table to be initialized | ||
523 | * @params: configuration parameters | ||
524 | * | ||
525 | * Initializes a new hash table based on the provided configuration | ||
526 | * parameters. A table can be configured either with a variable or | ||
527 | * fixed length key: | ||
528 | * | ||
529 | * Configuration Example 1: Fixed length keys | ||
530 | * struct test_obj { | ||
531 | * int key; | ||
532 | * void * my_member; | ||
533 | * struct rhash_head node; | ||
534 | * }; | ||
535 | * | ||
536 | * struct rhashtable_params params = { | ||
537 | * .head_offset = offsetof(struct test_obj, node), | ||
538 | * .key_offset = offsetof(struct test_obj, key), | ||
539 | * .key_len = sizeof(int), | ||
540 | * .hashfn = arch_fast_hash, | ||
541 | * .mutex_is_held = &my_mutex_is_held, | ||
542 | * }; | ||
543 | * | ||
544 | * Configuration Example 2: Variable length keys | ||
545 | * struct test_obj { | ||
546 | * [...] | ||
547 | * struct rhash_head node; | ||
548 | * }; | ||
549 | * | ||
550 | * u32 my_hash_fn(const void *data, u32 seed) | ||
551 | * { | ||
552 | * struct test_obj *obj = data; | ||
553 | * | ||
554 | * return [... hash ...]; | ||
555 | * } | ||
556 | * | ||
557 | * struct rhashtable_params params = { | ||
558 | * .head_offset = offsetof(struct test_obj, node), | ||
559 | * .hashfn = arch_fast_hash, | ||
560 | * .obj_hashfn = my_hash_fn, | ||
561 | * .mutex_is_held = &my_mutex_is_held, | ||
562 | * }; | ||
563 | */ | ||
564 | int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | ||
565 | { | ||
566 | struct bucket_table *tbl; | ||
567 | size_t size; | ||
568 | |||
569 | size = HASH_DEFAULT_SIZE; | ||
570 | |||
571 | if ((params->key_len && !params->hashfn) || | ||
572 | (!params->key_len && !params->obj_hashfn)) | ||
573 | return -EINVAL; | ||
574 | |||
575 | if (params->nelem_hint) | ||
576 | size = rounded_hashtable_size(params->nelem_hint); | ||
577 | |||
578 | tbl = bucket_table_alloc(size, GFP_KERNEL); | ||
579 | if (tbl == NULL) | ||
580 | return -ENOMEM; | ||
581 | |||
582 | memset(ht, 0, sizeof(*ht)); | ||
583 | ht->shift = ilog2(tbl->size); | ||
584 | memcpy(&ht->p, params, sizeof(*params)); | ||
585 | RCU_INIT_POINTER(ht->tbl, tbl); | ||
586 | |||
587 | if (!ht->p.hash_rnd) | ||
588 | get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); | ||
589 | |||
590 | return 0; | ||
591 | } | ||
592 | EXPORT_SYMBOL_GPL(rhashtable_init); | ||
593 | |||
594 | /** | ||
595 | * rhashtable_destroy - destroy hash table | ||
596 | * @ht: the hash table to destroy | ||
597 | * | ||
598 | * Frees the bucket array. | ||
599 | */ | ||
600 | void rhashtable_destroy(const struct rhashtable *ht) | ||
601 | { | ||
602 | const struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | ||
603 | |||
604 | bucket_table_free(tbl); | ||
605 | } | ||
606 | EXPORT_SYMBOL_GPL(rhashtable_destroy); | ||
607 | |||
608 | /************************************************************************** | ||
609 | * Self Test | ||
610 | **************************************************************************/ | ||
611 | |||
612 | #ifdef CONFIG_TEST_RHASHTABLE | ||
613 | |||
614 | #define TEST_HT_SIZE 8 | ||
615 | #define TEST_ENTRIES 2048 | ||
616 | #define TEST_PTR ((void *) 0xdeadbeef) | ||
617 | #define TEST_NEXPANDS 4 | ||
618 | |||
619 | static int test_mutex_is_held(void) | ||
620 | { | ||
621 | return 1; | ||
622 | } | ||
623 | |||
624 | struct test_obj { | ||
625 | void *ptr; | ||
626 | int value; | ||
627 | struct rhash_head node; | ||
628 | }; | ||
629 | |||
630 | static int __init test_rht_lookup(struct rhashtable *ht) | ||
631 | { | ||
632 | unsigned int i; | ||
633 | |||
634 | for (i = 0; i < TEST_ENTRIES * 2; i++) { | ||
635 | struct test_obj *obj; | ||
636 | bool expected = !(i % 2); | ||
637 | u32 key = i; | ||
638 | |||
639 | obj = rhashtable_lookup(ht, &key); | ||
640 | |||
641 | if (expected && !obj) { | ||
642 | pr_warn("Test failed: Could not find key %u\n", key); | ||
643 | return -ENOENT; | ||
644 | } else if (!expected && obj) { | ||
645 | pr_warn("Test failed: Unexpected entry found for key %u\n", | ||
646 | key); | ||
647 | return -EEXIST; | ||
648 | } else if (expected && obj) { | ||
649 | if (obj->ptr != TEST_PTR || obj->value != i) { | ||
650 | pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n", | ||
651 | obj->ptr, TEST_PTR, obj->value, i); | ||
652 | return -EINVAL; | ||
653 | } | ||
654 | } | ||
655 | } | ||
656 | |||
657 | return 0; | ||
658 | } | ||
659 | |||
660 | static void test_bucket_stats(struct rhashtable *ht, | ||
661 | struct bucket_table *tbl, | ||
662 | bool quiet) | ||
663 | { | ||
664 | unsigned int cnt, i, total = 0; | ||
665 | struct test_obj *obj; | ||
666 | |||
667 | for (i = 0; i < tbl->size; i++) { | ||
668 | cnt = 0; | ||
669 | |||
670 | if (!quiet) | ||
671 | pr_info(" [%#4x/%zu]", i, tbl->size); | ||
672 | |||
673 | rht_for_each_entry_rcu(obj, tbl->buckets[i], node) { | ||
674 | cnt++; | ||
675 | total++; | ||
676 | if (!quiet) | ||
677 | pr_cont(" [%p],", obj); | ||
678 | } | ||
679 | |||
680 | if (!quiet) | ||
681 | pr_cont("\n [%#x] first element: %p, chain length: %u\n", | ||
682 | i, tbl->buckets[i], cnt); | ||
683 | } | ||
684 | |||
685 | pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n", | ||
686 | total, ht->nelems, TEST_ENTRIES); | ||
687 | } | ||
688 | |||
689 | static int __init test_rhashtable(struct rhashtable *ht) | ||
690 | { | ||
691 | struct bucket_table *tbl; | ||
692 | struct test_obj *obj, *next; | ||
693 | int err; | ||
694 | unsigned int i; | ||
695 | |||
696 | /* | ||
697 | * Insertion Test: | ||
698 | * Insert TEST_ENTRIES into table with all keys even numbers | ||
699 | */ | ||
700 | pr_info(" Adding %d keys\n", TEST_ENTRIES); | ||
701 | for (i = 0; i < TEST_ENTRIES; i++) { | ||
702 | struct test_obj *obj; | ||
703 | |||
704 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | ||
705 | if (!obj) { | ||
706 | err = -ENOMEM; | ||
707 | goto error; | ||
708 | } | ||
709 | |||
710 | obj->ptr = TEST_PTR; | ||
711 | obj->value = i * 2; | ||
712 | |||
713 | rhashtable_insert(ht, &obj->node, GFP_KERNEL); | ||
714 | } | ||
715 | |||
716 | rcu_read_lock(); | ||
717 | tbl = rht_dereference_rcu(ht->tbl, ht); | ||
718 | test_bucket_stats(ht, tbl, true); | ||
719 | test_rht_lookup(ht); | ||
720 | rcu_read_unlock(); | ||
721 | |||
722 | for (i = 0; i < TEST_NEXPANDS; i++) { | ||
723 | pr_info(" Table expansion iteration %u...\n", i); | ||
724 | rhashtable_expand(ht, GFP_KERNEL); | ||
725 | |||
726 | rcu_read_lock(); | ||
727 | pr_info(" Verifying lookups...\n"); | ||
728 | test_rht_lookup(ht); | ||
729 | rcu_read_unlock(); | ||
730 | } | ||
731 | |||
732 | for (i = 0; i < TEST_NEXPANDS; i++) { | ||
733 | pr_info(" Table shrinkage iteration %u...\n", i); | ||
734 | rhashtable_shrink(ht, GFP_KERNEL); | ||
735 | |||
736 | rcu_read_lock(); | ||
737 | pr_info(" Verifying lookups...\n"); | ||
738 | test_rht_lookup(ht); | ||
739 | rcu_read_unlock(); | ||
740 | } | ||
741 | |||
742 | pr_info(" Deleting %d keys\n", TEST_ENTRIES); | ||
743 | for (i = 0; i < TEST_ENTRIES; i++) { | ||
744 | u32 key = i * 2; | ||
745 | |||
746 | obj = rhashtable_lookup(ht, &key); | ||
747 | BUG_ON(!obj); | ||
748 | |||
749 | rhashtable_remove(ht, &obj->node, GFP_KERNEL); | ||
750 | kfree(obj); | ||
751 | } | ||
752 | |||
753 | return 0; | ||
754 | |||
755 | error: | ||
756 | tbl = rht_dereference_rcu(ht->tbl, ht); | ||
757 | for (i = 0; i < tbl->size; i++) | ||
758 | rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node) | ||
759 | kfree(obj); | ||
760 | |||
761 | return err; | ||
762 | } | ||
763 | |||
764 | static int __init test_rht_init(void) | ||
765 | { | ||
766 | struct rhashtable ht; | ||
767 | struct rhashtable_params params = { | ||
768 | .nelem_hint = TEST_HT_SIZE, | ||
769 | .head_offset = offsetof(struct test_obj, node), | ||
770 | .key_offset = offsetof(struct test_obj, value), | ||
771 | .key_len = sizeof(int), | ||
772 | .hashfn = arch_fast_hash, | ||
773 | .mutex_is_held = &test_mutex_is_held, | ||
774 | .grow_decision = rht_grow_above_75, | ||
775 | .shrink_decision = rht_shrink_below_30, | ||
776 | }; | ||
777 | int err; | ||
778 | |||
779 | pr_info("Running resizable hashtable tests...\n"); | ||
780 | |||
781 | err = rhashtable_init(&ht, ¶ms); | ||
782 | if (err < 0) { | ||
783 | pr_warn("Test failed: Unable to initialize hashtable: %d\n", | ||
784 | err); | ||
785 | return err; | ||
786 | } | ||
787 | |||
788 | err = test_rhashtable(&ht); | ||
789 | |||
790 | rhashtable_destroy(&ht); | ||
791 | |||
792 | return err; | ||
793 | } | ||
794 | |||
795 | subsys_initcall(test_rht_init); | ||
796 | |||
797 | #endif /* CONFIG_TEST_RHASHTABLE */ | ||