diff options
Diffstat (limited to 'lib/xarray.c')
-rw-r--r-- | lib/xarray.c | 2036 |
1 files changed, 2036 insertions, 0 deletions
diff --git a/lib/xarray.c b/lib/xarray.c new file mode 100644 index 000000000000..8b176f009c08 --- /dev/null +++ b/lib/xarray.c | |||
@@ -0,0 +1,2036 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
2 | /* | ||
3 | * XArray implementation | ||
4 | * Copyright (c) 2017 Microsoft Corporation | ||
5 | * Author: Matthew Wilcox <willy@infradead.org> | ||
6 | */ | ||
7 | |||
8 | #include <linux/bitmap.h> | ||
9 | #include <linux/export.h> | ||
10 | #include <linux/list.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/xarray.h> | ||
13 | |||
14 | /* | ||
15 | * Coding conventions in this file: | ||
16 | * | ||
17 | * @xa is used to refer to the entire xarray. | ||
18 | * @xas is the 'xarray operation state'. It may be either a pointer to | ||
19 | * an xa_state, or an xa_state stored on the stack. This is an unfortunate | ||
20 | * ambiguity. | ||
21 | * @index is the index of the entry being operated on | ||
22 | * @mark is an xa_mark_t; a small number indicating one of the mark bits. | ||
23 | * @node refers to an xa_node; usually the primary one being operated on by | ||
24 | * this function. | ||
25 | * @offset is the index into the slots array inside an xa_node. | ||
26 | * @parent refers to the @xa_node closer to the head than @node. | ||
27 | * @entry refers to something stored in a slot in the xarray | ||
28 | */ | ||
29 | |||
30 | static inline unsigned int xa_lock_type(const struct xarray *xa) | ||
31 | { | ||
32 | return (__force unsigned int)xa->xa_flags & 3; | ||
33 | } | ||
34 | |||
35 | static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type) | ||
36 | { | ||
37 | if (lock_type == XA_LOCK_IRQ) | ||
38 | xas_lock_irq(xas); | ||
39 | else if (lock_type == XA_LOCK_BH) | ||
40 | xas_lock_bh(xas); | ||
41 | else | ||
42 | xas_lock(xas); | ||
43 | } | ||
44 | |||
45 | static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) | ||
46 | { | ||
47 | if (lock_type == XA_LOCK_IRQ) | ||
48 | xas_unlock_irq(xas); | ||
49 | else if (lock_type == XA_LOCK_BH) | ||
50 | xas_unlock_bh(xas); | ||
51 | else | ||
52 | xas_unlock(xas); | ||
53 | } | ||
54 | |||
55 | static inline bool xa_track_free(const struct xarray *xa) | ||
56 | { | ||
57 | return xa->xa_flags & XA_FLAGS_TRACK_FREE; | ||
58 | } | ||
59 | |||
60 | static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark) | ||
61 | { | ||
62 | if (!(xa->xa_flags & XA_FLAGS_MARK(mark))) | ||
63 | xa->xa_flags |= XA_FLAGS_MARK(mark); | ||
64 | } | ||
65 | |||
66 | static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark) | ||
67 | { | ||
68 | if (xa->xa_flags & XA_FLAGS_MARK(mark)) | ||
69 | xa->xa_flags &= ~(XA_FLAGS_MARK(mark)); | ||
70 | } | ||
71 | |||
72 | static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark) | ||
73 | { | ||
74 | return node->marks[(__force unsigned)mark]; | ||
75 | } | ||
76 | |||
77 | static inline bool node_get_mark(struct xa_node *node, | ||
78 | unsigned int offset, xa_mark_t mark) | ||
79 | { | ||
80 | return test_bit(offset, node_marks(node, mark)); | ||
81 | } | ||
82 | |||
83 | /* returns true if the bit was set */ | ||
84 | static inline bool node_set_mark(struct xa_node *node, unsigned int offset, | ||
85 | xa_mark_t mark) | ||
86 | { | ||
87 | return __test_and_set_bit(offset, node_marks(node, mark)); | ||
88 | } | ||
89 | |||
90 | /* returns true if the bit was set */ | ||
91 | static inline bool node_clear_mark(struct xa_node *node, unsigned int offset, | ||
92 | xa_mark_t mark) | ||
93 | { | ||
94 | return __test_and_clear_bit(offset, node_marks(node, mark)); | ||
95 | } | ||
96 | |||
97 | static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark) | ||
98 | { | ||
99 | return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE); | ||
100 | } | ||
101 | |||
102 | static inline void node_mark_all(struct xa_node *node, xa_mark_t mark) | ||
103 | { | ||
104 | bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE); | ||
105 | } | ||
106 | |||
107 | #define mark_inc(mark) do { \ | ||
108 | mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \ | ||
109 | } while (0) | ||
110 | |||
111 | /* | ||
112 | * xas_squash_marks() - Merge all marks to the first entry | ||
113 | * @xas: Array operation state. | ||
114 | * | ||
115 | * Set a mark on the first entry if any entry has it set. Clear marks on | ||
116 | * all sibling entries. | ||
117 | */ | ||
118 | static void xas_squash_marks(const struct xa_state *xas) | ||
119 | { | ||
120 | unsigned int mark = 0; | ||
121 | unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; | ||
122 | |||
123 | if (!xas->xa_sibs) | ||
124 | return; | ||
125 | |||
126 | do { | ||
127 | unsigned long *marks = xas->xa_node->marks[mark]; | ||
128 | if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit) | ||
129 | continue; | ||
130 | __set_bit(xas->xa_offset, marks); | ||
131 | bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs); | ||
132 | } while (mark++ != (__force unsigned)XA_MARK_MAX); | ||
133 | } | ||
134 | |||
135 | /* extracts the offset within this node from the index */ | ||
136 | static unsigned int get_offset(unsigned long index, struct xa_node *node) | ||
137 | { | ||
138 | return (index >> node->shift) & XA_CHUNK_MASK; | ||
139 | } | ||
140 | |||
141 | static void xas_set_offset(struct xa_state *xas) | ||
142 | { | ||
143 | xas->xa_offset = get_offset(xas->xa_index, xas->xa_node); | ||
144 | } | ||
145 | |||
146 | /* move the index either forwards (find) or backwards (sibling slot) */ | ||
147 | static void xas_move_index(struct xa_state *xas, unsigned long offset) | ||
148 | { | ||
149 | unsigned int shift = xas->xa_node->shift; | ||
150 | xas->xa_index &= ~XA_CHUNK_MASK << shift; | ||
151 | xas->xa_index += offset << shift; | ||
152 | } | ||
153 | |||
154 | static void xas_advance(struct xa_state *xas) | ||
155 | { | ||
156 | xas->xa_offset++; | ||
157 | xas_move_index(xas, xas->xa_offset); | ||
158 | } | ||
159 | |||
160 | static void *set_bounds(struct xa_state *xas) | ||
161 | { | ||
162 | xas->xa_node = XAS_BOUNDS; | ||
163 | return NULL; | ||
164 | } | ||
165 | |||
166 | /* | ||
167 | * Starts a walk. If the @xas is already valid, we assume that it's on | ||
168 | * the right path and just return where we've got to. If we're in an | ||
169 | * error state, return NULL. If the index is outside the current scope | ||
170 | * of the xarray, return NULL without changing @xas->xa_node. Otherwise | ||
171 | * set @xas->xa_node to NULL and return the current head of the array. | ||
172 | */ | ||
173 | static void *xas_start(struct xa_state *xas) | ||
174 | { | ||
175 | void *entry; | ||
176 | |||
177 | if (xas_valid(xas)) | ||
178 | return xas_reload(xas); | ||
179 | if (xas_error(xas)) | ||
180 | return NULL; | ||
181 | |||
182 | entry = xa_head(xas->xa); | ||
183 | if (!xa_is_node(entry)) { | ||
184 | if (xas->xa_index) | ||
185 | return set_bounds(xas); | ||
186 | } else { | ||
187 | if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK) | ||
188 | return set_bounds(xas); | ||
189 | } | ||
190 | |||
191 | xas->xa_node = NULL; | ||
192 | return entry; | ||
193 | } | ||
194 | |||
195 | static void *xas_descend(struct xa_state *xas, struct xa_node *node) | ||
196 | { | ||
197 | unsigned int offset = get_offset(xas->xa_index, node); | ||
198 | void *entry = xa_entry(xas->xa, node, offset); | ||
199 | |||
200 | xas->xa_node = node; | ||
201 | if (xa_is_sibling(entry)) { | ||
202 | offset = xa_to_sibling(entry); | ||
203 | entry = xa_entry(xas->xa, node, offset); | ||
204 | } | ||
205 | |||
206 | xas->xa_offset = offset; | ||
207 | return entry; | ||
208 | } | ||
209 | |||
210 | /** | ||
211 | * xas_load() - Load an entry from the XArray (advanced). | ||
212 | * @xas: XArray operation state. | ||
213 | * | ||
214 | * Usually walks the @xas to the appropriate state to load the entry | ||
215 | * stored at xa_index. However, it will do nothing and return %NULL if | ||
216 | * @xas is in an error state. xas_load() will never expand the tree. | ||
217 | * | ||
218 | * If the xa_state is set up to operate on a multi-index entry, xas_load() | ||
219 | * may return %NULL or an internal entry, even if there are entries | ||
220 | * present within the range specified by @xas. | ||
221 | * | ||
222 | * Context: Any context. The caller should hold the xa_lock or the RCU lock. | ||
223 | * Return: Usually an entry in the XArray, but see description for exceptions. | ||
224 | */ | ||
225 | void *xas_load(struct xa_state *xas) | ||
226 | { | ||
227 | void *entry = xas_start(xas); | ||
228 | |||
229 | while (xa_is_node(entry)) { | ||
230 | struct xa_node *node = xa_to_node(entry); | ||
231 | |||
232 | if (xas->xa_shift > node->shift) | ||
233 | break; | ||
234 | entry = xas_descend(xas, node); | ||
235 | } | ||
236 | return entry; | ||
237 | } | ||
238 | EXPORT_SYMBOL_GPL(xas_load); | ||
239 | |||
240 | /* Move the radix tree node cache here */ | ||
241 | extern struct kmem_cache *radix_tree_node_cachep; | ||
242 | extern void radix_tree_node_rcu_free(struct rcu_head *head); | ||
243 | |||
244 | #define XA_RCU_FREE ((struct xarray *)1) | ||
245 | |||
246 | static void xa_node_free(struct xa_node *node) | ||
247 | { | ||
248 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); | ||
249 | node->array = XA_RCU_FREE; | ||
250 | call_rcu(&node->rcu_head, radix_tree_node_rcu_free); | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * xas_destroy() - Free any resources allocated during the XArray operation. | ||
255 | * @xas: XArray operation state. | ||
256 | * | ||
257 | * This function is now internal-only. | ||
258 | */ | ||
259 | static void xas_destroy(struct xa_state *xas) | ||
260 | { | ||
261 | struct xa_node *node = xas->xa_alloc; | ||
262 | |||
263 | if (!node) | ||
264 | return; | ||
265 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); | ||
266 | kmem_cache_free(radix_tree_node_cachep, node); | ||
267 | xas->xa_alloc = NULL; | ||
268 | } | ||
269 | |||
270 | /** | ||
271 | * xas_nomem() - Allocate memory if needed. | ||
272 | * @xas: XArray operation state. | ||
273 | * @gfp: Memory allocation flags. | ||
274 | * | ||
275 | * If we need to add new nodes to the XArray, we try to allocate memory | ||
276 | * with GFP_NOWAIT while holding the lock, which will usually succeed. | ||
277 | * If it fails, @xas is flagged as needing memory to continue. The caller | ||
278 | * should drop the lock and call xas_nomem(). If xas_nomem() succeeds, | ||
279 | * the caller should retry the operation. | ||
280 | * | ||
281 | * Forward progress is guaranteed as one node is allocated here and | ||
282 | * stored in the xa_state where it will be found by xas_alloc(). More | ||
283 | * nodes will likely be found in the slab allocator, but we do not tie | ||
284 | * them up here. | ||
285 | * | ||
286 | * Return: true if memory was needed, and was successfully allocated. | ||
287 | */ | ||
288 | bool xas_nomem(struct xa_state *xas, gfp_t gfp) | ||
289 | { | ||
290 | if (xas->xa_node != XA_ERROR(-ENOMEM)) { | ||
291 | xas_destroy(xas); | ||
292 | return false; | ||
293 | } | ||
294 | xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); | ||
295 | if (!xas->xa_alloc) | ||
296 | return false; | ||
297 | XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); | ||
298 | xas->xa_node = XAS_RESTART; | ||
299 | return true; | ||
300 | } | ||
301 | EXPORT_SYMBOL_GPL(xas_nomem); | ||
302 | |||
303 | /* | ||
304 | * __xas_nomem() - Drop locks and allocate memory if needed. | ||
305 | * @xas: XArray operation state. | ||
306 | * @gfp: Memory allocation flags. | ||
307 | * | ||
308 | * Internal variant of xas_nomem(). | ||
309 | * | ||
310 | * Return: true if memory was needed, and was successfully allocated. | ||
311 | */ | ||
312 | static bool __xas_nomem(struct xa_state *xas, gfp_t gfp) | ||
313 | __must_hold(xas->xa->xa_lock) | ||
314 | { | ||
315 | unsigned int lock_type = xa_lock_type(xas->xa); | ||
316 | |||
317 | if (xas->xa_node != XA_ERROR(-ENOMEM)) { | ||
318 | xas_destroy(xas); | ||
319 | return false; | ||
320 | } | ||
321 | if (gfpflags_allow_blocking(gfp)) { | ||
322 | xas_unlock_type(xas, lock_type); | ||
323 | xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); | ||
324 | xas_lock_type(xas, lock_type); | ||
325 | } else { | ||
326 | xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); | ||
327 | } | ||
328 | if (!xas->xa_alloc) | ||
329 | return false; | ||
330 | XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); | ||
331 | xas->xa_node = XAS_RESTART; | ||
332 | return true; | ||
333 | } | ||
334 | |||
335 | static void xas_update(struct xa_state *xas, struct xa_node *node) | ||
336 | { | ||
337 | if (xas->xa_update) | ||
338 | xas->xa_update(node); | ||
339 | else | ||
340 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); | ||
341 | } | ||
342 | |||
343 | static void *xas_alloc(struct xa_state *xas, unsigned int shift) | ||
344 | { | ||
345 | struct xa_node *parent = xas->xa_node; | ||
346 | struct xa_node *node = xas->xa_alloc; | ||
347 | |||
348 | if (xas_invalid(xas)) | ||
349 | return NULL; | ||
350 | |||
351 | if (node) { | ||
352 | xas->xa_alloc = NULL; | ||
353 | } else { | ||
354 | node = kmem_cache_alloc(radix_tree_node_cachep, | ||
355 | GFP_NOWAIT | __GFP_NOWARN); | ||
356 | if (!node) { | ||
357 | xas_set_err(xas, -ENOMEM); | ||
358 | return NULL; | ||
359 | } | ||
360 | } | ||
361 | |||
362 | if (parent) { | ||
363 | node->offset = xas->xa_offset; | ||
364 | parent->count++; | ||
365 | XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE); | ||
366 | xas_update(xas, parent); | ||
367 | } | ||
368 | XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); | ||
369 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); | ||
370 | node->shift = shift; | ||
371 | node->count = 0; | ||
372 | node->nr_values = 0; | ||
373 | RCU_INIT_POINTER(node->parent, xas->xa_node); | ||
374 | node->array = xas->xa; | ||
375 | |||
376 | return node; | ||
377 | } | ||
378 | |||
379 | #ifdef CONFIG_XARRAY_MULTI | ||
380 | /* Returns the number of indices covered by a given xa_state */ | ||
381 | static unsigned long xas_size(const struct xa_state *xas) | ||
382 | { | ||
383 | return (xas->xa_sibs + 1UL) << xas->xa_shift; | ||
384 | } | ||
385 | #endif | ||
386 | |||
387 | /* | ||
388 | * Use this to calculate the maximum index that will need to be created | ||
389 | * in order to add the entry described by @xas. Because we cannot store a | ||
390 | * multiple-index entry at index 0, the calculation is a little more complex | ||
391 | * than you might expect. | ||
392 | */ | ||
393 | static unsigned long xas_max(struct xa_state *xas) | ||
394 | { | ||
395 | unsigned long max = xas->xa_index; | ||
396 | |||
397 | #ifdef CONFIG_XARRAY_MULTI | ||
398 | if (xas->xa_shift || xas->xa_sibs) { | ||
399 | unsigned long mask = xas_size(xas) - 1; | ||
400 | max |= mask; | ||
401 | if (mask == max) | ||
402 | max++; | ||
403 | } | ||
404 | #endif | ||
405 | |||
406 | return max; | ||
407 | } | ||
408 | |||
409 | /* The maximum index that can be contained in the array without expanding it */ | ||
410 | static unsigned long max_index(void *entry) | ||
411 | { | ||
412 | if (!xa_is_node(entry)) | ||
413 | return 0; | ||
414 | return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1; | ||
415 | } | ||
416 | |||
417 | static void xas_shrink(struct xa_state *xas) | ||
418 | { | ||
419 | struct xarray *xa = xas->xa; | ||
420 | struct xa_node *node = xas->xa_node; | ||
421 | |||
422 | for (;;) { | ||
423 | void *entry; | ||
424 | |||
425 | XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); | ||
426 | if (node->count != 1) | ||
427 | break; | ||
428 | entry = xa_entry_locked(xa, node, 0); | ||
429 | if (!entry) | ||
430 | break; | ||
431 | if (!xa_is_node(entry) && node->shift) | ||
432 | break; | ||
433 | xas->xa_node = XAS_BOUNDS; | ||
434 | |||
435 | RCU_INIT_POINTER(xa->xa_head, entry); | ||
436 | if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK)) | ||
437 | xa_mark_clear(xa, XA_FREE_MARK); | ||
438 | |||
439 | node->count = 0; | ||
440 | node->nr_values = 0; | ||
441 | if (!xa_is_node(entry)) | ||
442 | RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY); | ||
443 | xas_update(xas, node); | ||
444 | xa_node_free(node); | ||
445 | if (!xa_is_node(entry)) | ||
446 | break; | ||
447 | node = xa_to_node(entry); | ||
448 | node->parent = NULL; | ||
449 | } | ||
450 | } | ||
451 | |||
452 | /* | ||
453 | * xas_delete_node() - Attempt to delete an xa_node | ||
454 | * @xas: Array operation state. | ||
455 | * | ||
456 | * Attempts to delete the @xas->xa_node. This will fail if xa->node has | ||
457 | * a non-zero reference count. | ||
458 | */ | ||
459 | static void xas_delete_node(struct xa_state *xas) | ||
460 | { | ||
461 | struct xa_node *node = xas->xa_node; | ||
462 | |||
463 | for (;;) { | ||
464 | struct xa_node *parent; | ||
465 | |||
466 | XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); | ||
467 | if (node->count) | ||
468 | break; | ||
469 | |||
470 | parent = xa_parent_locked(xas->xa, node); | ||
471 | xas->xa_node = parent; | ||
472 | xas->xa_offset = node->offset; | ||
473 | xa_node_free(node); | ||
474 | |||
475 | if (!parent) { | ||
476 | xas->xa->xa_head = NULL; | ||
477 | xas->xa_node = XAS_BOUNDS; | ||
478 | return; | ||
479 | } | ||
480 | |||
481 | parent->slots[xas->xa_offset] = NULL; | ||
482 | parent->count--; | ||
483 | XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE); | ||
484 | node = parent; | ||
485 | xas_update(xas, node); | ||
486 | } | ||
487 | |||
488 | if (!node->parent) | ||
489 | xas_shrink(xas); | ||
490 | } | ||
491 | |||
492 | /** | ||
493 | * xas_free_nodes() - Free this node and all nodes that it references | ||
494 | * @xas: Array operation state. | ||
495 | * @top: Node to free | ||
496 | * | ||
497 | * This node has been removed from the tree. We must now free it and all | ||
498 | * of its subnodes. There may be RCU walkers with references into the tree, | ||
499 | * so we must replace all entries with retry markers. | ||
500 | */ | ||
501 | static void xas_free_nodes(struct xa_state *xas, struct xa_node *top) | ||
502 | { | ||
503 | unsigned int offset = 0; | ||
504 | struct xa_node *node = top; | ||
505 | |||
506 | for (;;) { | ||
507 | void *entry = xa_entry_locked(xas->xa, node, offset); | ||
508 | |||
509 | if (xa_is_node(entry)) { | ||
510 | node = xa_to_node(entry); | ||
511 | offset = 0; | ||
512 | continue; | ||
513 | } | ||
514 | if (entry) | ||
515 | RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY); | ||
516 | offset++; | ||
517 | while (offset == XA_CHUNK_SIZE) { | ||
518 | struct xa_node *parent; | ||
519 | |||
520 | parent = xa_parent_locked(xas->xa, node); | ||
521 | offset = node->offset + 1; | ||
522 | node->count = 0; | ||
523 | node->nr_values = 0; | ||
524 | xas_update(xas, node); | ||
525 | xa_node_free(node); | ||
526 | if (node == top) | ||
527 | return; | ||
528 | node = parent; | ||
529 | } | ||
530 | } | ||
531 | } | ||
532 | |||
533 | /* | ||
534 | * xas_expand adds nodes to the head of the tree until it has reached | ||
535 | * sufficient height to be able to contain @xas->xa_index | ||
536 | */ | ||
537 | static int xas_expand(struct xa_state *xas, void *head) | ||
538 | { | ||
539 | struct xarray *xa = xas->xa; | ||
540 | struct xa_node *node = NULL; | ||
541 | unsigned int shift = 0; | ||
542 | unsigned long max = xas_max(xas); | ||
543 | |||
544 | if (!head) { | ||
545 | if (max == 0) | ||
546 | return 0; | ||
547 | while ((max >> shift) >= XA_CHUNK_SIZE) | ||
548 | shift += XA_CHUNK_SHIFT; | ||
549 | return shift + XA_CHUNK_SHIFT; | ||
550 | } else if (xa_is_node(head)) { | ||
551 | node = xa_to_node(head); | ||
552 | shift = node->shift + XA_CHUNK_SHIFT; | ||
553 | } | ||
554 | xas->xa_node = NULL; | ||
555 | |||
556 | while (max > max_index(head)) { | ||
557 | xa_mark_t mark = 0; | ||
558 | |||
559 | XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); | ||
560 | node = xas_alloc(xas, shift); | ||
561 | if (!node) | ||
562 | return -ENOMEM; | ||
563 | |||
564 | node->count = 1; | ||
565 | if (xa_is_value(head)) | ||
566 | node->nr_values = 1; | ||
567 | RCU_INIT_POINTER(node->slots[0], head); | ||
568 | |||
569 | /* Propagate the aggregated mark info to the new child */ | ||
570 | for (;;) { | ||
571 | if (xa_track_free(xa) && mark == XA_FREE_MARK) { | ||
572 | node_mark_all(node, XA_FREE_MARK); | ||
573 | if (!xa_marked(xa, XA_FREE_MARK)) { | ||
574 | node_clear_mark(node, 0, XA_FREE_MARK); | ||
575 | xa_mark_set(xa, XA_FREE_MARK); | ||
576 | } | ||
577 | } else if (xa_marked(xa, mark)) { | ||
578 | node_set_mark(node, 0, mark); | ||
579 | } | ||
580 | if (mark == XA_MARK_MAX) | ||
581 | break; | ||
582 | mark_inc(mark); | ||
583 | } | ||
584 | |||
585 | /* | ||
586 | * Now that the new node is fully initialised, we can add | ||
587 | * it to the tree | ||
588 | */ | ||
589 | if (xa_is_node(head)) { | ||
590 | xa_to_node(head)->offset = 0; | ||
591 | rcu_assign_pointer(xa_to_node(head)->parent, node); | ||
592 | } | ||
593 | head = xa_mk_node(node); | ||
594 | rcu_assign_pointer(xa->xa_head, head); | ||
595 | xas_update(xas, node); | ||
596 | |||
597 | shift += XA_CHUNK_SHIFT; | ||
598 | } | ||
599 | |||
600 | xas->xa_node = node; | ||
601 | return shift; | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * xas_create() - Create a slot to store an entry in. | ||
606 | * @xas: XArray operation state. | ||
607 | * | ||
608 | * Most users will not need to call this function directly, as it is called | ||
609 | * by xas_store(). It is useful for doing conditional store operations | ||
610 | * (see the xa_cmpxchg() implementation for an example). | ||
611 | * | ||
612 | * Return: If the slot already existed, returns the contents of this slot. | ||
613 | * If the slot was newly created, returns NULL. If it failed to create the | ||
614 | * slot, returns NULL and indicates the error in @xas. | ||
615 | */ | ||
616 | static void *xas_create(struct xa_state *xas) | ||
617 | { | ||
618 | struct xarray *xa = xas->xa; | ||
619 | void *entry; | ||
620 | void __rcu **slot; | ||
621 | struct xa_node *node = xas->xa_node; | ||
622 | int shift; | ||
623 | unsigned int order = xas->xa_shift; | ||
624 | |||
625 | if (xas_top(node)) { | ||
626 | entry = xa_head_locked(xa); | ||
627 | xas->xa_node = NULL; | ||
628 | shift = xas_expand(xas, entry); | ||
629 | if (shift < 0) | ||
630 | return NULL; | ||
631 | entry = xa_head_locked(xa); | ||
632 | slot = &xa->xa_head; | ||
633 | } else if (xas_error(xas)) { | ||
634 | return NULL; | ||
635 | } else if (node) { | ||
636 | unsigned int offset = xas->xa_offset; | ||
637 | |||
638 | shift = node->shift; | ||
639 | entry = xa_entry_locked(xa, node, offset); | ||
640 | slot = &node->slots[offset]; | ||
641 | } else { | ||
642 | shift = 0; | ||
643 | entry = xa_head_locked(xa); | ||
644 | slot = &xa->xa_head; | ||
645 | } | ||
646 | |||
647 | while (shift > order) { | ||
648 | shift -= XA_CHUNK_SHIFT; | ||
649 | if (!entry) { | ||
650 | node = xas_alloc(xas, shift); | ||
651 | if (!node) | ||
652 | break; | ||
653 | if (xa_track_free(xa)) | ||
654 | node_mark_all(node, XA_FREE_MARK); | ||
655 | rcu_assign_pointer(*slot, xa_mk_node(node)); | ||
656 | } else if (xa_is_node(entry)) { | ||
657 | node = xa_to_node(entry); | ||
658 | } else { | ||
659 | break; | ||
660 | } | ||
661 | entry = xas_descend(xas, node); | ||
662 | slot = &node->slots[xas->xa_offset]; | ||
663 | } | ||
664 | |||
665 | return entry; | ||
666 | } | ||
667 | |||
668 | /** | ||
669 | * xas_create_range() - Ensure that stores to this range will succeed | ||
670 | * @xas: XArray operation state. | ||
671 | * | ||
672 | * Creates all of the slots in the range covered by @xas. Sets @xas to | ||
673 | * create single-index entries and positions it at the beginning of the | ||
674 | * range. This is for the benefit of users which have not yet been | ||
675 | * converted to use multi-index entries. | ||
676 | */ | ||
677 | void xas_create_range(struct xa_state *xas) | ||
678 | { | ||
679 | unsigned long index = xas->xa_index; | ||
680 | unsigned char shift = xas->xa_shift; | ||
681 | unsigned char sibs = xas->xa_sibs; | ||
682 | |||
683 | xas->xa_index |= ((sibs + 1) << shift) - 1; | ||
684 | if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift) | ||
685 | xas->xa_offset |= sibs; | ||
686 | xas->xa_shift = 0; | ||
687 | xas->xa_sibs = 0; | ||
688 | |||
689 | for (;;) { | ||
690 | xas_create(xas); | ||
691 | if (xas_error(xas)) | ||
692 | goto restore; | ||
693 | if (xas->xa_index <= (index | XA_CHUNK_MASK)) | ||
694 | goto success; | ||
695 | xas->xa_index -= XA_CHUNK_SIZE; | ||
696 | |||
697 | for (;;) { | ||
698 | struct xa_node *node = xas->xa_node; | ||
699 | xas->xa_node = xa_parent_locked(xas->xa, node); | ||
700 | xas->xa_offset = node->offset - 1; | ||
701 | if (node->offset != 0) | ||
702 | break; | ||
703 | } | ||
704 | } | ||
705 | |||
706 | restore: | ||
707 | xas->xa_shift = shift; | ||
708 | xas->xa_sibs = sibs; | ||
709 | xas->xa_index = index; | ||
710 | return; | ||
711 | success: | ||
712 | xas->xa_index = index; | ||
713 | if (xas->xa_node) | ||
714 | xas_set_offset(xas); | ||
715 | } | ||
716 | EXPORT_SYMBOL_GPL(xas_create_range); | ||
717 | |||
718 | static void update_node(struct xa_state *xas, struct xa_node *node, | ||
719 | int count, int values) | ||
720 | { | ||
721 | if (!node || (!count && !values)) | ||
722 | return; | ||
723 | |||
724 | node->count += count; | ||
725 | node->nr_values += values; | ||
726 | XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); | ||
727 | XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE); | ||
728 | xas_update(xas, node); | ||
729 | if (count < 0) | ||
730 | xas_delete_node(xas); | ||
731 | } | ||
732 | |||
733 | /** | ||
734 | * xas_store() - Store this entry in the XArray. | ||
735 | * @xas: XArray operation state. | ||
736 | * @entry: New entry. | ||
737 | * | ||
738 | * If @xas is operating on a multi-index entry, the entry returned by this | ||
739 | * function is essentially meaningless (it may be an internal entry or it | ||
740 | * may be %NULL, even if there are non-NULL entries at some of the indices | ||
741 | * covered by the range). This is not a problem for any current users, | ||
742 | * and can be changed if needed. | ||
743 | * | ||
744 | * Return: The old entry at this index. | ||
745 | */ | ||
746 | void *xas_store(struct xa_state *xas, void *entry) | ||
747 | { | ||
748 | struct xa_node *node; | ||
749 | void __rcu **slot = &xas->xa->xa_head; | ||
750 | unsigned int offset, max; | ||
751 | int count = 0; | ||
752 | int values = 0; | ||
753 | void *first, *next; | ||
754 | bool value = xa_is_value(entry); | ||
755 | |||
756 | if (entry) | ||
757 | first = xas_create(xas); | ||
758 | else | ||
759 | first = xas_load(xas); | ||
760 | |||
761 | if (xas_invalid(xas)) | ||
762 | return first; | ||
763 | node = xas->xa_node; | ||
764 | if (node && (xas->xa_shift < node->shift)) | ||
765 | xas->xa_sibs = 0; | ||
766 | if ((first == entry) && !xas->xa_sibs) | ||
767 | return first; | ||
768 | |||
769 | next = first; | ||
770 | offset = xas->xa_offset; | ||
771 | max = xas->xa_offset + xas->xa_sibs; | ||
772 | if (node) { | ||
773 | slot = &node->slots[offset]; | ||
774 | if (xas->xa_sibs) | ||
775 | xas_squash_marks(xas); | ||
776 | } | ||
777 | if (!entry) | ||
778 | xas_init_marks(xas); | ||
779 | |||
780 | for (;;) { | ||
781 | /* | ||
782 | * Must clear the marks before setting the entry to NULL, | ||
783 | * otherwise xas_for_each_marked may find a NULL entry and | ||
784 | * stop early. rcu_assign_pointer contains a release barrier | ||
785 | * so the mark clearing will appear to happen before the | ||
786 | * entry is set to NULL. | ||
787 | */ | ||
788 | rcu_assign_pointer(*slot, entry); | ||
789 | if (xa_is_node(next)) | ||
790 | xas_free_nodes(xas, xa_to_node(next)); | ||
791 | if (!node) | ||
792 | break; | ||
793 | count += !next - !entry; | ||
794 | values += !xa_is_value(first) - !value; | ||
795 | if (entry) { | ||
796 | if (offset == max) | ||
797 | break; | ||
798 | if (!xa_is_sibling(entry)) | ||
799 | entry = xa_mk_sibling(xas->xa_offset); | ||
800 | } else { | ||
801 | if (offset == XA_CHUNK_MASK) | ||
802 | break; | ||
803 | } | ||
804 | next = xa_entry_locked(xas->xa, node, ++offset); | ||
805 | if (!xa_is_sibling(next)) { | ||
806 | if (!entry && (offset > max)) | ||
807 | break; | ||
808 | first = next; | ||
809 | } | ||
810 | slot++; | ||
811 | } | ||
812 | |||
813 | update_node(xas, node, count, values); | ||
814 | return first; | ||
815 | } | ||
816 | EXPORT_SYMBOL_GPL(xas_store); | ||
817 | |||
818 | /** | ||
819 | * xas_get_mark() - Returns the state of this mark. | ||
820 | * @xas: XArray operation state. | ||
821 | * @mark: Mark number. | ||
822 | * | ||
823 | * Return: true if the mark is set, false if the mark is clear or @xas | ||
824 | * is in an error state. | ||
825 | */ | ||
826 | bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark) | ||
827 | { | ||
828 | if (xas_invalid(xas)) | ||
829 | return false; | ||
830 | if (!xas->xa_node) | ||
831 | return xa_marked(xas->xa, mark); | ||
832 | return node_get_mark(xas->xa_node, xas->xa_offset, mark); | ||
833 | } | ||
834 | EXPORT_SYMBOL_GPL(xas_get_mark); | ||
835 | |||
836 | /** | ||
837 | * xas_set_mark() - Sets the mark on this entry and its parents. | ||
838 | * @xas: XArray operation state. | ||
839 | * @mark: Mark number. | ||
840 | * | ||
841 | * Sets the specified mark on this entry, and walks up the tree setting it | ||
842 | * on all the ancestor entries. Does nothing if @xas has not been walked to | ||
843 | * an entry, or is in an error state. | ||
844 | */ | ||
845 | void xas_set_mark(const struct xa_state *xas, xa_mark_t mark) | ||
846 | { | ||
847 | struct xa_node *node = xas->xa_node; | ||
848 | unsigned int offset = xas->xa_offset; | ||
849 | |||
850 | if (xas_invalid(xas)) | ||
851 | return; | ||
852 | |||
853 | while (node) { | ||
854 | if (node_set_mark(node, offset, mark)) | ||
855 | return; | ||
856 | offset = node->offset; | ||
857 | node = xa_parent_locked(xas->xa, node); | ||
858 | } | ||
859 | |||
860 | if (!xa_marked(xas->xa, mark)) | ||
861 | xa_mark_set(xas->xa, mark); | ||
862 | } | ||
863 | EXPORT_SYMBOL_GPL(xas_set_mark); | ||
864 | |||
865 | /** | ||
866 | * xas_clear_mark() - Clears the mark on this entry and its parents. | ||
867 | * @xas: XArray operation state. | ||
868 | * @mark: Mark number. | ||
869 | * | ||
870 | * Clears the specified mark on this entry, and walks back to the head | ||
871 | * attempting to clear it on all the ancestor entries. Does nothing if | ||
872 | * @xas has not been walked to an entry, or is in an error state. | ||
873 | */ | ||
874 | void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark) | ||
875 | { | ||
876 | struct xa_node *node = xas->xa_node; | ||
877 | unsigned int offset = xas->xa_offset; | ||
878 | |||
879 | if (xas_invalid(xas)) | ||
880 | return; | ||
881 | |||
882 | while (node) { | ||
883 | if (!node_clear_mark(node, offset, mark)) | ||
884 | return; | ||
885 | if (node_any_mark(node, mark)) | ||
886 | return; | ||
887 | |||
888 | offset = node->offset; | ||
889 | node = xa_parent_locked(xas->xa, node); | ||
890 | } | ||
891 | |||
892 | if (xa_marked(xas->xa, mark)) | ||
893 | xa_mark_clear(xas->xa, mark); | ||
894 | } | ||
895 | EXPORT_SYMBOL_GPL(xas_clear_mark); | ||
896 | |||
897 | /** | ||
898 | * xas_init_marks() - Initialise all marks for the entry | ||
899 | * @xas: Array operations state. | ||
900 | * | ||
901 | * Initialise all marks for the entry specified by @xas. If we're tracking | ||
902 | * free entries with a mark, we need to set it on all entries. All other | ||
903 | * marks are cleared. | ||
904 | * | ||
905 | * This implementation is not as efficient as it could be; we may walk | ||
906 | * up the tree multiple times. | ||
907 | */ | ||
908 | void xas_init_marks(const struct xa_state *xas) | ||
909 | { | ||
910 | xa_mark_t mark = 0; | ||
911 | |||
912 | for (;;) { | ||
913 | if (xa_track_free(xas->xa) && mark == XA_FREE_MARK) | ||
914 | xas_set_mark(xas, mark); | ||
915 | else | ||
916 | xas_clear_mark(xas, mark); | ||
917 | if (mark == XA_MARK_MAX) | ||
918 | break; | ||
919 | mark_inc(mark); | ||
920 | } | ||
921 | } | ||
922 | EXPORT_SYMBOL_GPL(xas_init_marks); | ||
923 | |||
924 | /** | ||
925 | * xas_pause() - Pause a walk to drop a lock. | ||
926 | * @xas: XArray operation state. | ||
927 | * | ||
928 | * Some users need to pause a walk and drop the lock they're holding in | ||
929 | * order to yield to a higher priority thread or carry out an operation | ||
930 | * on an entry. Those users should call this function before they drop | ||
931 | * the lock. It resets the @xas to be suitable for the next iteration | ||
932 | * of the loop after the user has reacquired the lock. If most entries | ||
933 | * found during a walk require you to call xas_pause(), the xa_for_each() | ||
934 | * iterator may be more appropriate. | ||
935 | * | ||
936 | * Note that xas_pause() only works for forward iteration. If a user needs | ||
937 | * to pause a reverse iteration, we will need a xas_pause_rev(). | ||
938 | */ | ||
939 | void xas_pause(struct xa_state *xas) | ||
940 | { | ||
941 | struct xa_node *node = xas->xa_node; | ||
942 | |||
943 | if (xas_invalid(xas)) | ||
944 | return; | ||
945 | |||
946 | if (node) { | ||
947 | unsigned int offset = xas->xa_offset; | ||
948 | while (++offset < XA_CHUNK_SIZE) { | ||
949 | if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) | ||
950 | break; | ||
951 | } | ||
952 | xas->xa_index += (offset - xas->xa_offset) << node->shift; | ||
953 | } else { | ||
954 | xas->xa_index++; | ||
955 | } | ||
956 | xas->xa_node = XAS_RESTART; | ||
957 | } | ||
958 | EXPORT_SYMBOL_GPL(xas_pause); | ||
959 | |||
960 | /* | ||
961 | * __xas_prev() - Find the previous entry in the XArray. | ||
962 | * @xas: XArray operation state. | ||
963 | * | ||
964 | * Helper function for xas_prev() which handles all the complex cases | ||
965 | * out of line. | ||
966 | */ | ||
967 | void *__xas_prev(struct xa_state *xas) | ||
968 | { | ||
969 | void *entry; | ||
970 | |||
971 | if (!xas_frozen(xas->xa_node)) | ||
972 | xas->xa_index--; | ||
973 | if (xas_not_node(xas->xa_node)) | ||
974 | return xas_load(xas); | ||
975 | |||
976 | if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) | ||
977 | xas->xa_offset--; | ||
978 | |||
979 | while (xas->xa_offset == 255) { | ||
980 | xas->xa_offset = xas->xa_node->offset - 1; | ||
981 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); | ||
982 | if (!xas->xa_node) | ||
983 | return set_bounds(xas); | ||
984 | } | ||
985 | |||
986 | for (;;) { | ||
987 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | ||
988 | if (!xa_is_node(entry)) | ||
989 | return entry; | ||
990 | |||
991 | xas->xa_node = xa_to_node(entry); | ||
992 | xas_set_offset(xas); | ||
993 | } | ||
994 | } | ||
995 | EXPORT_SYMBOL_GPL(__xas_prev); | ||
996 | |||
997 | /* | ||
998 | * __xas_next() - Find the next entry in the XArray. | ||
999 | * @xas: XArray operation state. | ||
1000 | * | ||
1001 | * Helper function for xas_next() which handles all the complex cases | ||
1002 | * out of line. | ||
1003 | */ | ||
1004 | void *__xas_next(struct xa_state *xas) | ||
1005 | { | ||
1006 | void *entry; | ||
1007 | |||
1008 | if (!xas_frozen(xas->xa_node)) | ||
1009 | xas->xa_index++; | ||
1010 | if (xas_not_node(xas->xa_node)) | ||
1011 | return xas_load(xas); | ||
1012 | |||
1013 | if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) | ||
1014 | xas->xa_offset++; | ||
1015 | |||
1016 | while (xas->xa_offset == XA_CHUNK_SIZE) { | ||
1017 | xas->xa_offset = xas->xa_node->offset + 1; | ||
1018 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); | ||
1019 | if (!xas->xa_node) | ||
1020 | return set_bounds(xas); | ||
1021 | } | ||
1022 | |||
1023 | for (;;) { | ||
1024 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | ||
1025 | if (!xa_is_node(entry)) | ||
1026 | return entry; | ||
1027 | |||
1028 | xas->xa_node = xa_to_node(entry); | ||
1029 | xas_set_offset(xas); | ||
1030 | } | ||
1031 | } | ||
1032 | EXPORT_SYMBOL_GPL(__xas_next); | ||
1033 | |||
1034 | /** | ||
1035 | * xas_find() - Find the next present entry in the XArray. | ||
1036 | * @xas: XArray operation state. | ||
1037 | * @max: Highest index to return. | ||
1038 | * | ||
1039 | * If the @xas has not yet been walked to an entry, return the entry | ||
1040 | * which has an index >= xas.xa_index. If it has been walked, the entry | ||
1041 | * currently being pointed at has been processed, and so we move to the | ||
1042 | * next entry. | ||
1043 | * | ||
1044 | * If no entry is found and the array is smaller than @max, the iterator | ||
1045 | * is set to the smallest index not yet in the array. This allows @xas | ||
1046 | * to be immediately passed to xas_store(). | ||
1047 | * | ||
1048 | * Return: The entry, if found, otherwise %NULL. | ||
1049 | */ | ||
1050 | void *xas_find(struct xa_state *xas, unsigned long max) | ||
1051 | { | ||
1052 | void *entry; | ||
1053 | |||
1054 | if (xas_error(xas)) | ||
1055 | return NULL; | ||
1056 | |||
1057 | if (!xas->xa_node) { | ||
1058 | xas->xa_index = 1; | ||
1059 | return set_bounds(xas); | ||
1060 | } else if (xas_top(xas->xa_node)) { | ||
1061 | entry = xas_load(xas); | ||
1062 | if (entry || xas_not_node(xas->xa_node)) | ||
1063 | return entry; | ||
1064 | } else if (!xas->xa_node->shift && | ||
1065 | xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) { | ||
1066 | xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1; | ||
1067 | } | ||
1068 | |||
1069 | xas_advance(xas); | ||
1070 | |||
1071 | while (xas->xa_node && (xas->xa_index <= max)) { | ||
1072 | if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { | ||
1073 | xas->xa_offset = xas->xa_node->offset + 1; | ||
1074 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); | ||
1075 | continue; | ||
1076 | } | ||
1077 | |||
1078 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | ||
1079 | if (xa_is_node(entry)) { | ||
1080 | xas->xa_node = xa_to_node(entry); | ||
1081 | xas->xa_offset = 0; | ||
1082 | continue; | ||
1083 | } | ||
1084 | if (entry && !xa_is_sibling(entry)) | ||
1085 | return entry; | ||
1086 | |||
1087 | xas_advance(xas); | ||
1088 | } | ||
1089 | |||
1090 | if (!xas->xa_node) | ||
1091 | xas->xa_node = XAS_BOUNDS; | ||
1092 | return NULL; | ||
1093 | } | ||
1094 | EXPORT_SYMBOL_GPL(xas_find); | ||
1095 | |||
1096 | /** | ||
1097 | * xas_find_marked() - Find the next marked entry in the XArray. | ||
1098 | * @xas: XArray operation state. | ||
1099 | * @max: Highest index to return. | ||
1100 | * @mark: Mark number to search for. | ||
1101 | * | ||
1102 | * If the @xas has not yet been walked to an entry, return the marked entry | ||
1103 | * which has an index >= xas.xa_index. If it has been walked, the entry | ||
1104 | * currently being pointed at has been processed, and so we return the | ||
1105 | * first marked entry with an index > xas.xa_index. | ||
1106 | * | ||
1107 | * If no marked entry is found and the array is smaller than @max, @xas is | ||
1108 | * set to the bounds state and xas->xa_index is set to the smallest index | ||
1109 | * not yet in the array. This allows @xas to be immediately passed to | ||
1110 | * xas_store(). | ||
1111 | * | ||
1112 | * If no entry is found before @max is reached, @xas is set to the restart | ||
1113 | * state. | ||
1114 | * | ||
1115 | * Return: The entry, if found, otherwise %NULL. | ||
1116 | */ | ||
1117 | void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) | ||
1118 | { | ||
1119 | bool advance = true; | ||
1120 | unsigned int offset; | ||
1121 | void *entry; | ||
1122 | |||
1123 | if (xas_error(xas)) | ||
1124 | return NULL; | ||
1125 | |||
1126 | if (!xas->xa_node) { | ||
1127 | xas->xa_index = 1; | ||
1128 | goto out; | ||
1129 | } else if (xas_top(xas->xa_node)) { | ||
1130 | advance = false; | ||
1131 | entry = xa_head(xas->xa); | ||
1132 | xas->xa_node = NULL; | ||
1133 | if (xas->xa_index > max_index(entry)) | ||
1134 | goto bounds; | ||
1135 | if (!xa_is_node(entry)) { | ||
1136 | if (xa_marked(xas->xa, mark)) | ||
1137 | return entry; | ||
1138 | xas->xa_index = 1; | ||
1139 | goto out; | ||
1140 | } | ||
1141 | xas->xa_node = xa_to_node(entry); | ||
1142 | xas->xa_offset = xas->xa_index >> xas->xa_node->shift; | ||
1143 | } | ||
1144 | |||
1145 | while (xas->xa_index <= max) { | ||
1146 | if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { | ||
1147 | xas->xa_offset = xas->xa_node->offset + 1; | ||
1148 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); | ||
1149 | if (!xas->xa_node) | ||
1150 | break; | ||
1151 | advance = false; | ||
1152 | continue; | ||
1153 | } | ||
1154 | |||
1155 | if (!advance) { | ||
1156 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | ||
1157 | if (xa_is_sibling(entry)) { | ||
1158 | xas->xa_offset = xa_to_sibling(entry); | ||
1159 | xas_move_index(xas, xas->xa_offset); | ||
1160 | } | ||
1161 | } | ||
1162 | |||
1163 | offset = xas_find_chunk(xas, advance, mark); | ||
1164 | if (offset > xas->xa_offset) { | ||
1165 | advance = false; | ||
1166 | xas_move_index(xas, offset); | ||
1167 | /* Mind the wrap */ | ||
1168 | if ((xas->xa_index - 1) >= max) | ||
1169 | goto max; | ||
1170 | xas->xa_offset = offset; | ||
1171 | if (offset == XA_CHUNK_SIZE) | ||
1172 | continue; | ||
1173 | } | ||
1174 | |||
1175 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | ||
1176 | if (!xa_is_node(entry)) | ||
1177 | return entry; | ||
1178 | xas->xa_node = xa_to_node(entry); | ||
1179 | xas_set_offset(xas); | ||
1180 | } | ||
1181 | |||
1182 | out: | ||
1183 | if (!max) | ||
1184 | goto max; | ||
1185 | bounds: | ||
1186 | xas->xa_node = XAS_BOUNDS; | ||
1187 | return NULL; | ||
1188 | max: | ||
1189 | xas->xa_node = XAS_RESTART; | ||
1190 | return NULL; | ||
1191 | } | ||
1192 | EXPORT_SYMBOL_GPL(xas_find_marked); | ||
1193 | |||
1194 | /** | ||
1195 | * xas_find_conflict() - Find the next present entry in a range. | ||
1196 | * @xas: XArray operation state. | ||
1197 | * | ||
1198 | * The @xas describes both a range and a position within that range. | ||
1199 | * | ||
1200 | * Context: Any context. Expects xa_lock to be held. | ||
1201 | * Return: The next entry in the range covered by @xas or %NULL. | ||
1202 | */ | ||
1203 | void *xas_find_conflict(struct xa_state *xas) | ||
1204 | { | ||
1205 | void *curr; | ||
1206 | |||
1207 | if (xas_error(xas)) | ||
1208 | return NULL; | ||
1209 | |||
1210 | if (!xas->xa_node) | ||
1211 | return NULL; | ||
1212 | |||
1213 | if (xas_top(xas->xa_node)) { | ||
1214 | curr = xas_start(xas); | ||
1215 | if (!curr) | ||
1216 | return NULL; | ||
1217 | while (xa_is_node(curr)) { | ||
1218 | struct xa_node *node = xa_to_node(curr); | ||
1219 | curr = xas_descend(xas, node); | ||
1220 | } | ||
1221 | if (curr) | ||
1222 | return curr; | ||
1223 | } | ||
1224 | |||
1225 | if (xas->xa_node->shift > xas->xa_shift) | ||
1226 | return NULL; | ||
1227 | |||
1228 | for (;;) { | ||
1229 | if (xas->xa_node->shift == xas->xa_shift) { | ||
1230 | if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs) | ||
1231 | break; | ||
1232 | } else if (xas->xa_offset == XA_CHUNK_MASK) { | ||
1233 | xas->xa_offset = xas->xa_node->offset; | ||
1234 | xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node); | ||
1235 | if (!xas->xa_node) | ||
1236 | break; | ||
1237 | continue; | ||
1238 | } | ||
1239 | curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset); | ||
1240 | if (xa_is_sibling(curr)) | ||
1241 | continue; | ||
1242 | while (xa_is_node(curr)) { | ||
1243 | xas->xa_node = xa_to_node(curr); | ||
1244 | xas->xa_offset = 0; | ||
1245 | curr = xa_entry_locked(xas->xa, xas->xa_node, 0); | ||
1246 | } | ||
1247 | if (curr) | ||
1248 | return curr; | ||
1249 | } | ||
1250 | xas->xa_offset -= xas->xa_sibs; | ||
1251 | return NULL; | ||
1252 | } | ||
1253 | EXPORT_SYMBOL_GPL(xas_find_conflict); | ||
1254 | |||
1255 | /** | ||
1256 | * xa_init_flags() - Initialise an empty XArray with flags. | ||
1257 | * @xa: XArray. | ||
1258 | * @flags: XA_FLAG values. | ||
1259 | * | ||
1260 | * If you need to initialise an XArray with special flags (eg you need | ||
1261 | * to take the lock from interrupt context), use this function instead | ||
1262 | * of xa_init(). | ||
1263 | * | ||
1264 | * Context: Any context. | ||
1265 | */ | ||
1266 | void xa_init_flags(struct xarray *xa, gfp_t flags) | ||
1267 | { | ||
1268 | unsigned int lock_type; | ||
1269 | static struct lock_class_key xa_lock_irq; | ||
1270 | static struct lock_class_key xa_lock_bh; | ||
1271 | |||
1272 | spin_lock_init(&xa->xa_lock); | ||
1273 | xa->xa_flags = flags; | ||
1274 | xa->xa_head = NULL; | ||
1275 | |||
1276 | lock_type = xa_lock_type(xa); | ||
1277 | if (lock_type == XA_LOCK_IRQ) | ||
1278 | lockdep_set_class(&xa->xa_lock, &xa_lock_irq); | ||
1279 | else if (lock_type == XA_LOCK_BH) | ||
1280 | lockdep_set_class(&xa->xa_lock, &xa_lock_bh); | ||
1281 | } | ||
1282 | EXPORT_SYMBOL(xa_init_flags); | ||
1283 | |||
1284 | /** | ||
1285 | * xa_load() - Load an entry from an XArray. | ||
1286 | * @xa: XArray. | ||
1287 | * @index: index into array. | ||
1288 | * | ||
1289 | * Context: Any context. Takes and releases the RCU lock. | ||
1290 | * Return: The entry at @index in @xa. | ||
1291 | */ | ||
1292 | void *xa_load(struct xarray *xa, unsigned long index) | ||
1293 | { | ||
1294 | XA_STATE(xas, xa, index); | ||
1295 | void *entry; | ||
1296 | |||
1297 | rcu_read_lock(); | ||
1298 | do { | ||
1299 | entry = xas_load(&xas); | ||
1300 | if (xa_is_zero(entry)) | ||
1301 | entry = NULL; | ||
1302 | } while (xas_retry(&xas, entry)); | ||
1303 | rcu_read_unlock(); | ||
1304 | |||
1305 | return entry; | ||
1306 | } | ||
1307 | EXPORT_SYMBOL(xa_load); | ||
1308 | |||
1309 | static void *xas_result(struct xa_state *xas, void *curr) | ||
1310 | { | ||
1311 | if (xa_is_zero(curr)) | ||
1312 | return NULL; | ||
1313 | XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr)); | ||
1314 | if (xas_error(xas)) | ||
1315 | curr = xas->xa_node; | ||
1316 | return curr; | ||
1317 | } | ||
1318 | |||
1319 | /** | ||
1320 | * __xa_erase() - Erase this entry from the XArray while locked. | ||
1321 | * @xa: XArray. | ||
1322 | * @index: Index into array. | ||
1323 | * | ||
1324 | * If the entry at this index is a multi-index entry then all indices will | ||
1325 | * be erased, and the entry will no longer be a multi-index entry. | ||
1326 | * This function expects the xa_lock to be held on entry. | ||
1327 | * | ||
1328 | * Context: Any context. Expects xa_lock to be held on entry. May | ||
1329 | * release and reacquire xa_lock if @gfp flags permit. | ||
1330 | * Return: The old entry at this index. | ||
1331 | */ | ||
1332 | void *__xa_erase(struct xarray *xa, unsigned long index) | ||
1333 | { | ||
1334 | XA_STATE(xas, xa, index); | ||
1335 | return xas_result(&xas, xas_store(&xas, NULL)); | ||
1336 | } | ||
1337 | EXPORT_SYMBOL_GPL(__xa_erase); | ||
1338 | |||
1339 | /** | ||
1340 | * xa_store() - Store this entry in the XArray. | ||
1341 | * @xa: XArray. | ||
1342 | * @index: Index into array. | ||
1343 | * @entry: New entry. | ||
1344 | * @gfp: Memory allocation flags. | ||
1345 | * | ||
1346 | * After this function returns, loads from this index will return @entry. | ||
1347 | * Storing into an existing multislot entry updates the entry of every index. | ||
1348 | * The marks associated with @index are unaffected unless @entry is %NULL. | ||
1349 | * | ||
1350 | * Context: Process context. Takes and releases the xa_lock. May sleep | ||
1351 | * if the @gfp flags permit. | ||
1352 | * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry | ||
1353 | * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation | ||
1354 | * failed. | ||
1355 | */ | ||
1356 | void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) | ||
1357 | { | ||
1358 | XA_STATE(xas, xa, index); | ||
1359 | void *curr; | ||
1360 | |||
1361 | if (WARN_ON_ONCE(xa_is_internal(entry))) | ||
1362 | return XA_ERROR(-EINVAL); | ||
1363 | |||
1364 | do { | ||
1365 | xas_lock(&xas); | ||
1366 | curr = xas_store(&xas, entry); | ||
1367 | if (xa_track_free(xa) && entry) | ||
1368 | xas_clear_mark(&xas, XA_FREE_MARK); | ||
1369 | xas_unlock(&xas); | ||
1370 | } while (xas_nomem(&xas, gfp)); | ||
1371 | |||
1372 | return xas_result(&xas, curr); | ||
1373 | } | ||
1374 | EXPORT_SYMBOL(xa_store); | ||
1375 | |||
1376 | /** | ||
1377 | * __xa_store() - Store this entry in the XArray. | ||
1378 | * @xa: XArray. | ||
1379 | * @index: Index into array. | ||
1380 | * @entry: New entry. | ||
1381 | * @gfp: Memory allocation flags. | ||
1382 | * | ||
1383 | * You must already be holding the xa_lock when calling this function. | ||
1384 | * It will drop the lock if needed to allocate memory, and then reacquire | ||
1385 | * it afterwards. | ||
1386 | * | ||
1387 | * Context: Any context. Expects xa_lock to be held on entry. May | ||
1388 | * release and reacquire xa_lock if @gfp flags permit. | ||
1389 | * Return: The old entry at this index or xa_err() if an error happened. | ||
1390 | */ | ||
1391 | void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) | ||
1392 | { | ||
1393 | XA_STATE(xas, xa, index); | ||
1394 | void *curr; | ||
1395 | |||
1396 | if (WARN_ON_ONCE(xa_is_internal(entry))) | ||
1397 | return XA_ERROR(-EINVAL); | ||
1398 | |||
1399 | do { | ||
1400 | curr = xas_store(&xas, entry); | ||
1401 | if (xa_track_free(xa) && entry) | ||
1402 | xas_clear_mark(&xas, XA_FREE_MARK); | ||
1403 | } while (__xas_nomem(&xas, gfp)); | ||
1404 | |||
1405 | return xas_result(&xas, curr); | ||
1406 | } | ||
1407 | EXPORT_SYMBOL(__xa_store); | ||
1408 | |||
1409 | /** | ||
1410 | * xa_cmpxchg() - Conditionally replace an entry in the XArray. | ||
1411 | * @xa: XArray. | ||
1412 | * @index: Index into array. | ||
1413 | * @old: Old value to test against. | ||
1414 | * @entry: New value to place in array. | ||
1415 | * @gfp: Memory allocation flags. | ||
1416 | * | ||
1417 | * If the entry at @index is the same as @old, replace it with @entry. | ||
1418 | * If the return value is equal to @old, then the exchange was successful. | ||
1419 | * | ||
1420 | * Context: Process context. Takes and releases the xa_lock. May sleep | ||
1421 | * if the @gfp flags permit. | ||
1422 | * Return: The old value at this index or xa_err() if an error happened. | ||
1423 | */ | ||
1424 | void *xa_cmpxchg(struct xarray *xa, unsigned long index, | ||
1425 | void *old, void *entry, gfp_t gfp) | ||
1426 | { | ||
1427 | XA_STATE(xas, xa, index); | ||
1428 | void *curr; | ||
1429 | |||
1430 | if (WARN_ON_ONCE(xa_is_internal(entry))) | ||
1431 | return XA_ERROR(-EINVAL); | ||
1432 | |||
1433 | do { | ||
1434 | xas_lock(&xas); | ||
1435 | curr = xas_load(&xas); | ||
1436 | if (curr == XA_ZERO_ENTRY) | ||
1437 | curr = NULL; | ||
1438 | if (curr == old) { | ||
1439 | xas_store(&xas, entry); | ||
1440 | if (xa_track_free(xa) && entry) | ||
1441 | xas_clear_mark(&xas, XA_FREE_MARK); | ||
1442 | } | ||
1443 | xas_unlock(&xas); | ||
1444 | } while (xas_nomem(&xas, gfp)); | ||
1445 | |||
1446 | return xas_result(&xas, curr); | ||
1447 | } | ||
1448 | EXPORT_SYMBOL(xa_cmpxchg); | ||
1449 | |||
1450 | /** | ||
1451 | * __xa_cmpxchg() - Store this entry in the XArray. | ||
1452 | * @xa: XArray. | ||
1453 | * @index: Index into array. | ||
1454 | * @old: Old value to test against. | ||
1455 | * @entry: New entry. | ||
1456 | * @gfp: Memory allocation flags. | ||
1457 | * | ||
1458 | * You must already be holding the xa_lock when calling this function. | ||
1459 | * It will drop the lock if needed to allocate memory, and then reacquire | ||
1460 | * it afterwards. | ||
1461 | * | ||
1462 | * Context: Any context. Expects xa_lock to be held on entry. May | ||
1463 | * release and reacquire xa_lock if @gfp flags permit. | ||
1464 | * Return: The old entry at this index or xa_err() if an error happened. | ||
1465 | */ | ||
1466 | void *__xa_cmpxchg(struct xarray *xa, unsigned long index, | ||
1467 | void *old, void *entry, gfp_t gfp) | ||
1468 | { | ||
1469 | XA_STATE(xas, xa, index); | ||
1470 | void *curr; | ||
1471 | |||
1472 | if (WARN_ON_ONCE(xa_is_internal(entry))) | ||
1473 | return XA_ERROR(-EINVAL); | ||
1474 | |||
1475 | do { | ||
1476 | curr = xas_load(&xas); | ||
1477 | if (curr == XA_ZERO_ENTRY) | ||
1478 | curr = NULL; | ||
1479 | if (curr == old) { | ||
1480 | xas_store(&xas, entry); | ||
1481 | if (xa_track_free(xa) && entry) | ||
1482 | xas_clear_mark(&xas, XA_FREE_MARK); | ||
1483 | } | ||
1484 | } while (__xas_nomem(&xas, gfp)); | ||
1485 | |||
1486 | return xas_result(&xas, curr); | ||
1487 | } | ||
1488 | EXPORT_SYMBOL(__xa_cmpxchg); | ||
1489 | |||
1490 | /** | ||
1491 | * xa_reserve() - Reserve this index in the XArray. | ||
1492 | * @xa: XArray. | ||
1493 | * @index: Index into array. | ||
1494 | * @gfp: Memory allocation flags. | ||
1495 | * | ||
1496 | * Ensures there is somewhere to store an entry at @index in the array. | ||
1497 | * If there is already something stored at @index, this function does | ||
1498 | * nothing. If there was nothing there, the entry is marked as reserved. | ||
1499 | * Loads from @index will continue to see a %NULL pointer until a | ||
1500 | * subsequent store to @index. | ||
1501 | * | ||
1502 | * If you do not use the entry that you have reserved, call xa_release() | ||
1503 | * or xa_erase() to free any unnecessary memory. | ||
1504 | * | ||
1505 | * Context: Process context. Takes and releases the xa_lock, IRQ or BH safe | ||
1506 | * if specified in XArray flags. May sleep if the @gfp flags permit. | ||
1507 | * Return: 0 if the reservation succeeded or -ENOMEM if it failed. | ||
1508 | */ | ||
1509 | int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) | ||
1510 | { | ||
1511 | XA_STATE(xas, xa, index); | ||
1512 | unsigned int lock_type = xa_lock_type(xa); | ||
1513 | void *curr; | ||
1514 | |||
1515 | do { | ||
1516 | xas_lock_type(&xas, lock_type); | ||
1517 | curr = xas_load(&xas); | ||
1518 | if (!curr) | ||
1519 | xas_store(&xas, XA_ZERO_ENTRY); | ||
1520 | xas_unlock_type(&xas, lock_type); | ||
1521 | } while (xas_nomem(&xas, gfp)); | ||
1522 | |||
1523 | return xas_error(&xas); | ||
1524 | } | ||
1525 | EXPORT_SYMBOL(xa_reserve); | ||
1526 | |||
1527 | #ifdef CONFIG_XARRAY_MULTI | ||
1528 | static void xas_set_range(struct xa_state *xas, unsigned long first, | ||
1529 | unsigned long last) | ||
1530 | { | ||
1531 | unsigned int shift = 0; | ||
1532 | unsigned long sibs = last - first; | ||
1533 | unsigned int offset = XA_CHUNK_MASK; | ||
1534 | |||
1535 | xas_set(xas, first); | ||
1536 | |||
1537 | while ((first & XA_CHUNK_MASK) == 0) { | ||
1538 | if (sibs < XA_CHUNK_MASK) | ||
1539 | break; | ||
1540 | if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK)) | ||
1541 | break; | ||
1542 | shift += XA_CHUNK_SHIFT; | ||
1543 | if (offset == XA_CHUNK_MASK) | ||
1544 | offset = sibs & XA_CHUNK_MASK; | ||
1545 | sibs >>= XA_CHUNK_SHIFT; | ||
1546 | first >>= XA_CHUNK_SHIFT; | ||
1547 | } | ||
1548 | |||
1549 | offset = first & XA_CHUNK_MASK; | ||
1550 | if (offset + sibs > XA_CHUNK_MASK) | ||
1551 | sibs = XA_CHUNK_MASK - offset; | ||
1552 | if ((((first + sibs + 1) << shift) - 1) > last) | ||
1553 | sibs -= 1; | ||
1554 | |||
1555 | xas->xa_shift = shift; | ||
1556 | xas->xa_sibs = sibs; | ||
1557 | } | ||
1558 | |||
1559 | /** | ||
1560 | * xa_store_range() - Store this entry at a range of indices in the XArray. | ||
1561 | * @xa: XArray. | ||
1562 | * @first: First index to affect. | ||
1563 | * @last: Last index to affect. | ||
1564 | * @entry: New entry. | ||
1565 | * @gfp: Memory allocation flags. | ||
1566 | * | ||
1567 | * After this function returns, loads from any index between @first and @last, | ||
1568 | * inclusive will return @entry. | ||
1569 | * Storing into an existing multislot entry updates the entry of every index. | ||
1570 | * The marks associated with @index are unaffected unless @entry is %NULL. | ||
1571 | * | ||
1572 | * Context: Process context. Takes and releases the xa_lock. May sleep | ||
1573 | * if the @gfp flags permit. | ||
1574 | * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in | ||
1575 | * an XArray, or xa_err(-ENOMEM) if memory allocation failed. | ||
1576 | */ | ||
1577 | void *xa_store_range(struct xarray *xa, unsigned long first, | ||
1578 | unsigned long last, void *entry, gfp_t gfp) | ||
1579 | { | ||
1580 | XA_STATE(xas, xa, 0); | ||
1581 | |||
1582 | if (WARN_ON_ONCE(xa_is_internal(entry))) | ||
1583 | return XA_ERROR(-EINVAL); | ||
1584 | if (last < first) | ||
1585 | return XA_ERROR(-EINVAL); | ||
1586 | |||
1587 | do { | ||
1588 | xas_lock(&xas); | ||
1589 | if (entry) { | ||
1590 | unsigned int order = (last == ~0UL) ? 64 : | ||
1591 | ilog2(last + 1); | ||
1592 | xas_set_order(&xas, last, order); | ||
1593 | xas_create(&xas); | ||
1594 | if (xas_error(&xas)) | ||
1595 | goto unlock; | ||
1596 | } | ||
1597 | do { | ||
1598 | xas_set_range(&xas, first, last); | ||
1599 | xas_store(&xas, entry); | ||
1600 | if (xas_error(&xas)) | ||
1601 | goto unlock; | ||
1602 | first += xas_size(&xas); | ||
1603 | } while (first <= last); | ||
1604 | unlock: | ||
1605 | xas_unlock(&xas); | ||
1606 | } while (xas_nomem(&xas, gfp)); | ||
1607 | |||
1608 | return xas_result(&xas, NULL); | ||
1609 | } | ||
1610 | EXPORT_SYMBOL(xa_store_range); | ||
1611 | #endif /* CONFIG_XARRAY_MULTI */ | ||
1612 | |||
1613 | /** | ||
1614 | * __xa_alloc() - Find somewhere to store this entry in the XArray. | ||
1615 | * @xa: XArray. | ||
1616 | * @id: Pointer to ID. | ||
1617 | * @max: Maximum ID to allocate (inclusive). | ||
1618 | * @entry: New entry. | ||
1619 | * @gfp: Memory allocation flags. | ||
1620 | * | ||
1621 | * Allocates an unused ID in the range specified by @id and @max. | ||
1622 | * Updates the @id pointer with the index, then stores the entry at that | ||
1623 | * index. A concurrent lookup will not see an uninitialised @id. | ||
1624 | * | ||
1625 | * Context: Any context. Expects xa_lock to be held on entry. May | ||
1626 | * release and reacquire xa_lock if @gfp flags permit. | ||
1627 | * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if | ||
1628 | * there is no more space in the XArray. | ||
1629 | */ | ||
1630 | int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp) | ||
1631 | { | ||
1632 | XA_STATE(xas, xa, 0); | ||
1633 | int err; | ||
1634 | |||
1635 | if (WARN_ON_ONCE(xa_is_internal(entry))) | ||
1636 | return -EINVAL; | ||
1637 | if (WARN_ON_ONCE(!xa_track_free(xa))) | ||
1638 | return -EINVAL; | ||
1639 | |||
1640 | if (!entry) | ||
1641 | entry = XA_ZERO_ENTRY; | ||
1642 | |||
1643 | do { | ||
1644 | xas.xa_index = *id; | ||
1645 | xas_find_marked(&xas, max, XA_FREE_MARK); | ||
1646 | if (xas.xa_node == XAS_RESTART) | ||
1647 | xas_set_err(&xas, -ENOSPC); | ||
1648 | xas_store(&xas, entry); | ||
1649 | xas_clear_mark(&xas, XA_FREE_MARK); | ||
1650 | } while (__xas_nomem(&xas, gfp)); | ||
1651 | |||
1652 | err = xas_error(&xas); | ||
1653 | if (!err) | ||
1654 | *id = xas.xa_index; | ||
1655 | return err; | ||
1656 | } | ||
1657 | EXPORT_SYMBOL(__xa_alloc); | ||
1658 | |||
1659 | /** | ||
1660 | * __xa_set_mark() - Set this mark on this entry while locked. | ||
1661 | * @xa: XArray. | ||
1662 | * @index: Index of entry. | ||
1663 | * @mark: Mark number. | ||
1664 | * | ||
1665 | * Attempting to set a mark on a NULL entry does not succeed. | ||
1666 | * | ||
1667 | * Context: Any context. Expects xa_lock to be held on entry. | ||
1668 | */ | ||
1669 | void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | ||
1670 | { | ||
1671 | XA_STATE(xas, xa, index); | ||
1672 | void *entry = xas_load(&xas); | ||
1673 | |||
1674 | if (entry) | ||
1675 | xas_set_mark(&xas, mark); | ||
1676 | } | ||
1677 | EXPORT_SYMBOL_GPL(__xa_set_mark); | ||
1678 | |||
1679 | /** | ||
1680 | * __xa_clear_mark() - Clear this mark on this entry while locked. | ||
1681 | * @xa: XArray. | ||
1682 | * @index: Index of entry. | ||
1683 | * @mark: Mark number. | ||
1684 | * | ||
1685 | * Context: Any context. Expects xa_lock to be held on entry. | ||
1686 | */ | ||
1687 | void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | ||
1688 | { | ||
1689 | XA_STATE(xas, xa, index); | ||
1690 | void *entry = xas_load(&xas); | ||
1691 | |||
1692 | if (entry) | ||
1693 | xas_clear_mark(&xas, mark); | ||
1694 | } | ||
1695 | EXPORT_SYMBOL_GPL(__xa_clear_mark); | ||
1696 | |||
1697 | /** | ||
1698 | * xa_get_mark() - Inquire whether this mark is set on this entry. | ||
1699 | * @xa: XArray. | ||
1700 | * @index: Index of entry. | ||
1701 | * @mark: Mark number. | ||
1702 | * | ||
1703 | * This function uses the RCU read lock, so the result may be out of date | ||
1704 | * by the time it returns. If you need the result to be stable, use a lock. | ||
1705 | * | ||
1706 | * Context: Any context. Takes and releases the RCU lock. | ||
1707 | * Return: True if the entry at @index has this mark set, false if it doesn't. | ||
1708 | */ | ||
1709 | bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | ||
1710 | { | ||
1711 | XA_STATE(xas, xa, index); | ||
1712 | void *entry; | ||
1713 | |||
1714 | rcu_read_lock(); | ||
1715 | entry = xas_start(&xas); | ||
1716 | while (xas_get_mark(&xas, mark)) { | ||
1717 | if (!xa_is_node(entry)) | ||
1718 | goto found; | ||
1719 | entry = xas_descend(&xas, xa_to_node(entry)); | ||
1720 | } | ||
1721 | rcu_read_unlock(); | ||
1722 | return false; | ||
1723 | found: | ||
1724 | rcu_read_unlock(); | ||
1725 | return true; | ||
1726 | } | ||
1727 | EXPORT_SYMBOL(xa_get_mark); | ||
1728 | |||
1729 | /** | ||
1730 | * xa_set_mark() - Set this mark on this entry. | ||
1731 | * @xa: XArray. | ||
1732 | * @index: Index of entry. | ||
1733 | * @mark: Mark number. | ||
1734 | * | ||
1735 | * Attempting to set a mark on a NULL entry does not succeed. | ||
1736 | * | ||
1737 | * Context: Process context. Takes and releases the xa_lock. | ||
1738 | */ | ||
1739 | void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | ||
1740 | { | ||
1741 | xa_lock(xa); | ||
1742 | __xa_set_mark(xa, index, mark); | ||
1743 | xa_unlock(xa); | ||
1744 | } | ||
1745 | EXPORT_SYMBOL(xa_set_mark); | ||
1746 | |||
1747 | /** | ||
1748 | * xa_clear_mark() - Clear this mark on this entry. | ||
1749 | * @xa: XArray. | ||
1750 | * @index: Index of entry. | ||
1751 | * @mark: Mark number. | ||
1752 | * | ||
1753 | * Clearing a mark always succeeds. | ||
1754 | * | ||
1755 | * Context: Process context. Takes and releases the xa_lock. | ||
1756 | */ | ||
1757 | void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | ||
1758 | { | ||
1759 | xa_lock(xa); | ||
1760 | __xa_clear_mark(xa, index, mark); | ||
1761 | xa_unlock(xa); | ||
1762 | } | ||
1763 | EXPORT_SYMBOL(xa_clear_mark); | ||
1764 | |||
1765 | /** | ||
1766 | * xa_find() - Search the XArray for an entry. | ||
1767 | * @xa: XArray. | ||
1768 | * @indexp: Pointer to an index. | ||
1769 | * @max: Maximum index to search to. | ||
1770 | * @filter: Selection criterion. | ||
1771 | * | ||
1772 | * Finds the entry in @xa which matches the @filter, and has the lowest | ||
1773 | * index that is at least @indexp and no more than @max. | ||
1774 | * If an entry is found, @indexp is updated to be the index of the entry. | ||
1775 | * This function is protected by the RCU read lock, so it may not find | ||
1776 | * entries which are being simultaneously added. It will not return an | ||
1777 | * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). | ||
1778 | * | ||
1779 | * Context: Any context. Takes and releases the RCU lock. | ||
1780 | * Return: The entry, if found, otherwise %NULL. | ||
1781 | */ | ||
1782 | void *xa_find(struct xarray *xa, unsigned long *indexp, | ||
1783 | unsigned long max, xa_mark_t filter) | ||
1784 | { | ||
1785 | XA_STATE(xas, xa, *indexp); | ||
1786 | void *entry; | ||
1787 | |||
1788 | rcu_read_lock(); | ||
1789 | do { | ||
1790 | if ((__force unsigned int)filter < XA_MAX_MARKS) | ||
1791 | entry = xas_find_marked(&xas, max, filter); | ||
1792 | else | ||
1793 | entry = xas_find(&xas, max); | ||
1794 | } while (xas_retry(&xas, entry)); | ||
1795 | rcu_read_unlock(); | ||
1796 | |||
1797 | if (entry) | ||
1798 | *indexp = xas.xa_index; | ||
1799 | return entry; | ||
1800 | } | ||
1801 | EXPORT_SYMBOL(xa_find); | ||
1802 | |||
1803 | /** | ||
1804 | * xa_find_after() - Search the XArray for a present entry. | ||
1805 | * @xa: XArray. | ||
1806 | * @indexp: Pointer to an index. | ||
1807 | * @max: Maximum index to search to. | ||
1808 | * @filter: Selection criterion. | ||
1809 | * | ||
1810 | * Finds the entry in @xa which matches the @filter and has the lowest | ||
1811 | * index that is above @indexp and no more than @max. | ||
1812 | * If an entry is found, @indexp is updated to be the index of the entry. | ||
1813 | * This function is protected by the RCU read lock, so it may miss entries | ||
1814 | * which are being simultaneously added. It will not return an | ||
1815 | * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). | ||
1816 | * | ||
1817 | * Context: Any context. Takes and releases the RCU lock. | ||
1818 | * Return: The pointer, if found, otherwise %NULL. | ||
1819 | */ | ||
1820 | void *xa_find_after(struct xarray *xa, unsigned long *indexp, | ||
1821 | unsigned long max, xa_mark_t filter) | ||
1822 | { | ||
1823 | XA_STATE(xas, xa, *indexp + 1); | ||
1824 | void *entry; | ||
1825 | |||
1826 | rcu_read_lock(); | ||
1827 | for (;;) { | ||
1828 | if ((__force unsigned int)filter < XA_MAX_MARKS) | ||
1829 | entry = xas_find_marked(&xas, max, filter); | ||
1830 | else | ||
1831 | entry = xas_find(&xas, max); | ||
1832 | if (xas.xa_shift) { | ||
1833 | if (xas.xa_index & ((1UL << xas.xa_shift) - 1)) | ||
1834 | continue; | ||
1835 | } else { | ||
1836 | if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK)) | ||
1837 | continue; | ||
1838 | } | ||
1839 | if (!xas_retry(&xas, entry)) | ||
1840 | break; | ||
1841 | } | ||
1842 | rcu_read_unlock(); | ||
1843 | |||
1844 | if (entry) | ||
1845 | *indexp = xas.xa_index; | ||
1846 | return entry; | ||
1847 | } | ||
1848 | EXPORT_SYMBOL(xa_find_after); | ||
1849 | |||
1850 | static unsigned int xas_extract_present(struct xa_state *xas, void **dst, | ||
1851 | unsigned long max, unsigned int n) | ||
1852 | { | ||
1853 | void *entry; | ||
1854 | unsigned int i = 0; | ||
1855 | |||
1856 | rcu_read_lock(); | ||
1857 | xas_for_each(xas, entry, max) { | ||
1858 | if (xas_retry(xas, entry)) | ||
1859 | continue; | ||
1860 | dst[i++] = entry; | ||
1861 | if (i == n) | ||
1862 | break; | ||
1863 | } | ||
1864 | rcu_read_unlock(); | ||
1865 | |||
1866 | return i; | ||
1867 | } | ||
1868 | |||
1869 | static unsigned int xas_extract_marked(struct xa_state *xas, void **dst, | ||
1870 | unsigned long max, unsigned int n, xa_mark_t mark) | ||
1871 | { | ||
1872 | void *entry; | ||
1873 | unsigned int i = 0; | ||
1874 | |||
1875 | rcu_read_lock(); | ||
1876 | xas_for_each_marked(xas, entry, max, mark) { | ||
1877 | if (xas_retry(xas, entry)) | ||
1878 | continue; | ||
1879 | dst[i++] = entry; | ||
1880 | if (i == n) | ||
1881 | break; | ||
1882 | } | ||
1883 | rcu_read_unlock(); | ||
1884 | |||
1885 | return i; | ||
1886 | } | ||
1887 | |||
1888 | /** | ||
1889 | * xa_extract() - Copy selected entries from the XArray into a normal array. | ||
1890 | * @xa: The source XArray to copy from. | ||
1891 | * @dst: The buffer to copy entries into. | ||
1892 | * @start: The first index in the XArray eligible to be selected. | ||
1893 | * @max: The last index in the XArray eligible to be selected. | ||
1894 | * @n: The maximum number of entries to copy. | ||
1895 | * @filter: Selection criterion. | ||
1896 | * | ||
1897 | * Copies up to @n entries that match @filter from the XArray. The | ||
1898 | * copied entries will have indices between @start and @max, inclusive. | ||
1899 | * | ||
1900 | * The @filter may be an XArray mark value, in which case entries which are | ||
1901 | * marked with that mark will be copied. It may also be %XA_PRESENT, in | ||
1902 | * which case all entries which are not NULL will be copied. | ||
1903 | * | ||
1904 | * The entries returned may not represent a snapshot of the XArray at a | ||
1905 | * moment in time. For example, if another thread stores to index 5, then | ||
1906 | * index 10, calling xa_extract() may return the old contents of index 5 | ||
1907 | * and the new contents of index 10. Indices not modified while this | ||
1908 | * function is running will not be skipped. | ||
1909 | * | ||
1910 | * If you need stronger guarantees, holding the xa_lock across calls to this | ||
1911 | * function will prevent concurrent modification. | ||
1912 | * | ||
1913 | * Context: Any context. Takes and releases the RCU lock. | ||
1914 | * Return: The number of entries copied. | ||
1915 | */ | ||
1916 | unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start, | ||
1917 | unsigned long max, unsigned int n, xa_mark_t filter) | ||
1918 | { | ||
1919 | XA_STATE(xas, xa, start); | ||
1920 | |||
1921 | if (!n) | ||
1922 | return 0; | ||
1923 | |||
1924 | if ((__force unsigned int)filter < XA_MAX_MARKS) | ||
1925 | return xas_extract_marked(&xas, dst, max, n, filter); | ||
1926 | return xas_extract_present(&xas, dst, max, n); | ||
1927 | } | ||
1928 | EXPORT_SYMBOL(xa_extract); | ||
1929 | |||
1930 | /** | ||
1931 | * xa_destroy() - Free all internal data structures. | ||
1932 | * @xa: XArray. | ||
1933 | * | ||
1934 | * After calling this function, the XArray is empty and has freed all memory | ||
1935 | * allocated for its internal data structures. You are responsible for | ||
1936 | * freeing the objects referenced by the XArray. | ||
1937 | * | ||
1938 | * Context: Any context. Takes and releases the xa_lock, interrupt-safe. | ||
1939 | */ | ||
1940 | void xa_destroy(struct xarray *xa) | ||
1941 | { | ||
1942 | XA_STATE(xas, xa, 0); | ||
1943 | unsigned long flags; | ||
1944 | void *entry; | ||
1945 | |||
1946 | xas.xa_node = NULL; | ||
1947 | xas_lock_irqsave(&xas, flags); | ||
1948 | entry = xa_head_locked(xa); | ||
1949 | RCU_INIT_POINTER(xa->xa_head, NULL); | ||
1950 | xas_init_marks(&xas); | ||
1951 | /* lockdep checks we're still holding the lock in xas_free_nodes() */ | ||
1952 | if (xa_is_node(entry)) | ||
1953 | xas_free_nodes(&xas, xa_to_node(entry)); | ||
1954 | xas_unlock_irqrestore(&xas, flags); | ||
1955 | } | ||
1956 | EXPORT_SYMBOL(xa_destroy); | ||
1957 | |||
1958 | #ifdef XA_DEBUG | ||
1959 | void xa_dump_node(const struct xa_node *node) | ||
1960 | { | ||
1961 | unsigned i, j; | ||
1962 | |||
1963 | if (!node) | ||
1964 | return; | ||
1965 | if ((unsigned long)node & 3) { | ||
1966 | pr_cont("node %px\n", node); | ||
1967 | return; | ||
1968 | } | ||
1969 | |||
1970 | pr_cont("node %px %s %d parent %px shift %d count %d values %d " | ||
1971 | "array %px list %px %px marks", | ||
1972 | node, node->parent ? "offset" : "max", node->offset, | ||
1973 | node->parent, node->shift, node->count, node->nr_values, | ||
1974 | node->array, node->private_list.prev, node->private_list.next); | ||
1975 | for (i = 0; i < XA_MAX_MARKS; i++) | ||
1976 | for (j = 0; j < XA_MARK_LONGS; j++) | ||
1977 | pr_cont(" %lx", node->marks[i][j]); | ||
1978 | pr_cont("\n"); | ||
1979 | } | ||
1980 | |||
1981 | void xa_dump_index(unsigned long index, unsigned int shift) | ||
1982 | { | ||
1983 | if (!shift) | ||
1984 | pr_info("%lu: ", index); | ||
1985 | else if (shift >= BITS_PER_LONG) | ||
1986 | pr_info("0-%lu: ", ~0UL); | ||
1987 | else | ||
1988 | pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1)); | ||
1989 | } | ||
1990 | |||
1991 | void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift) | ||
1992 | { | ||
1993 | if (!entry) | ||
1994 | return; | ||
1995 | |||
1996 | xa_dump_index(index, shift); | ||
1997 | |||
1998 | if (xa_is_node(entry)) { | ||
1999 | if (shift == 0) { | ||
2000 | pr_cont("%px\n", entry); | ||
2001 | } else { | ||
2002 | unsigned long i; | ||
2003 | struct xa_node *node = xa_to_node(entry); | ||
2004 | xa_dump_node(node); | ||
2005 | for (i = 0; i < XA_CHUNK_SIZE; i++) | ||
2006 | xa_dump_entry(node->slots[i], | ||
2007 | index + (i << node->shift), node->shift); | ||
2008 | } | ||
2009 | } else if (xa_is_value(entry)) | ||
2010 | pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry), | ||
2011 | xa_to_value(entry), entry); | ||
2012 | else if (!xa_is_internal(entry)) | ||
2013 | pr_cont("%px\n", entry); | ||
2014 | else if (xa_is_retry(entry)) | ||
2015 | pr_cont("retry (%ld)\n", xa_to_internal(entry)); | ||
2016 | else if (xa_is_sibling(entry)) | ||
2017 | pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry)); | ||
2018 | else if (xa_is_zero(entry)) | ||
2019 | pr_cont("zero (%ld)\n", xa_to_internal(entry)); | ||
2020 | else | ||
2021 | pr_cont("UNKNOWN ENTRY (%px)\n", entry); | ||
2022 | } | ||
2023 | |||
2024 | void xa_dump(const struct xarray *xa) | ||
2025 | { | ||
2026 | void *entry = xa->xa_head; | ||
2027 | unsigned int shift = 0; | ||
2028 | |||
2029 | pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry, | ||
2030 | xa->xa_flags, xa_marked(xa, XA_MARK_0), | ||
2031 | xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2)); | ||
2032 | if (xa_is_node(entry)) | ||
2033 | shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT; | ||
2034 | xa_dump_entry(entry, 0, shift); | ||
2035 | } | ||
2036 | #endif | ||