diff options
author | David S. Miller <davem@davemloft.net> | 2019-01-27 13:43:17 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-01-27 13:43:17 -0500 |
commit | 1d68101367a92336e633d0c3681bf8c86644e124 (patch) | |
tree | 8fdacef561481c25c71641b8fa91a5dfa0ba7f96 /include/linux/xarray.h | |
parent | 085c4c7dd2b6558fb079fad07d6e9064f5e0b4c2 (diff) | |
parent | 1fc7f56db7a7c467e46a5d2e2a009d2f337e0338 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'include/linux/xarray.h')
-rw-r--r-- | include/linux/xarray.h | 227 |
1 files changed, 170 insertions, 57 deletions
diff --git a/include/linux/xarray.h b/include/linux/xarray.h index f492e21c4aa2..5d9d318bcf7a 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h | |||
@@ -176,7 +176,8 @@ static inline bool xa_is_internal(const void *entry) | |||
176 | */ | 176 | */ |
177 | static inline bool xa_is_err(const void *entry) | 177 | static inline bool xa_is_err(const void *entry) |
178 | { | 178 | { |
179 | return unlikely(xa_is_internal(entry)); | 179 | return unlikely(xa_is_internal(entry) && |
180 | entry >= xa_mk_internal(-MAX_ERRNO)); | ||
180 | } | 181 | } |
181 | 182 | ||
182 | /** | 183 | /** |
@@ -286,7 +287,6 @@ struct xarray { | |||
286 | */ | 287 | */ |
287 | #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) | 288 | #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) |
288 | 289 | ||
289 | void xa_init_flags(struct xarray *, gfp_t flags); | ||
290 | void *xa_load(struct xarray *, unsigned long index); | 290 | void *xa_load(struct xarray *, unsigned long index); |
291 | void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); | 291 | void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); |
292 | void *xa_erase(struct xarray *, unsigned long index); | 292 | void *xa_erase(struct xarray *, unsigned long index); |
@@ -304,6 +304,24 @@ unsigned int xa_extract(struct xarray *, void **dst, unsigned long start, | |||
304 | void xa_destroy(struct xarray *); | 304 | void xa_destroy(struct xarray *); |
305 | 305 | ||
306 | /** | 306 | /** |
307 | * xa_init_flags() - Initialise an empty XArray with flags. | ||
308 | * @xa: XArray. | ||
309 | * @flags: XA_FLAG values. | ||
310 | * | ||
311 | * If you need to initialise an XArray with special flags (eg you need | ||
312 | * to take the lock from interrupt context), use this function instead | ||
313 | * of xa_init(). | ||
314 | * | ||
315 | * Context: Any context. | ||
316 | */ | ||
317 | static inline void xa_init_flags(struct xarray *xa, gfp_t flags) | ||
318 | { | ||
319 | spin_lock_init(&xa->xa_lock); | ||
320 | xa->xa_flags = flags; | ||
321 | xa->xa_head = NULL; | ||
322 | } | ||
323 | |||
324 | /** | ||
307 | * xa_init() - Initialise an empty XArray. | 325 | * xa_init() - Initialise an empty XArray. |
308 | * @xa: XArray. | 326 | * @xa: XArray. |
309 | * | 327 | * |
@@ -342,20 +360,45 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) | |||
342 | } | 360 | } |
343 | 361 | ||
344 | /** | 362 | /** |
345 | * xa_for_each() - Iterate over a portion of an XArray. | 363 | * xa_for_each_start() - Iterate over a portion of an XArray. |
346 | * @xa: XArray. | 364 | * @xa: XArray. |
365 | * @index: Index of @entry. | ||
347 | * @entry: Entry retrieved from array. | 366 | * @entry: Entry retrieved from array. |
367 | * @start: First index to retrieve from array. | ||
368 | * | ||
369 | * During the iteration, @entry will have the value of the entry stored | ||
370 | * in @xa at @index. You may modify @index during the iteration if you | ||
371 | * want to skip or reprocess indices. It is safe to modify the array | ||
372 | * during the iteration. At the end of the iteration, @entry will be set | ||
373 | * to NULL and @index will have a value less than or equal to max. | ||
374 | * | ||
375 | * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have | ||
376 | * to handle your own locking with xas_for_each(), and if you have to unlock | ||
377 | * after each iteration, it will also end up being O(n.log(n)). | ||
378 | * xa_for_each_start() will spin if it hits a retry entry; if you intend to | ||
379 | * see retry entries, you should use the xas_for_each() iterator instead. | ||
380 | * The xas_for_each() iterator will expand into more inline code than | ||
381 | * xa_for_each_start(). | ||
382 | * | ||
383 | * Context: Any context. Takes and releases the RCU lock. | ||
384 | */ | ||
385 | #define xa_for_each_start(xa, index, entry, start) \ | ||
386 | for (index = start, \ | ||
387 | entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \ | ||
388 | entry; \ | ||
389 | entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)) | ||
390 | |||
391 | /** | ||
392 | * xa_for_each() - Iterate over present entries in an XArray. | ||
393 | * @xa: XArray. | ||
348 | * @index: Index of @entry. | 394 | * @index: Index of @entry. |
349 | * @max: Maximum index to retrieve from array. | 395 | * @entry: Entry retrieved from array. |
350 | * @filter: Selection criterion. | ||
351 | * | 396 | * |
352 | * Initialise @index to the lowest index you want to retrieve from the | 397 | * During the iteration, @entry will have the value of the entry stored |
353 | * array. During the iteration, @entry will have the value of the entry | 398 | * in @xa at @index. You may modify @index during the iteration if you want |
354 | * stored in @xa at @index. The iteration will skip all entries in the | 399 | * to skip or reprocess indices. It is safe to modify the array during the |
355 | * array which do not match @filter. You may modify @index during the | 400 | * iteration. At the end of the iteration, @entry will be set to NULL and |
356 | * iteration if you want to skip or reprocess indices. It is safe to modify | 401 | * @index will have a value less than or equal to max. |
357 | * the array during the iteration. At the end of the iteration, @entry will | ||
358 | * be set to NULL and @index will have a value less than or equal to max. | ||
359 | * | 402 | * |
360 | * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have | 403 | * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have |
361 | * to handle your own locking with xas_for_each(), and if you have to unlock | 404 | * to handle your own locking with xas_for_each(), and if you have to unlock |
@@ -366,9 +409,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) | |||
366 | * | 409 | * |
367 | * Context: Any context. Takes and releases the RCU lock. | 410 | * Context: Any context. Takes and releases the RCU lock. |
368 | */ | 411 | */ |
369 | #define xa_for_each(xa, entry, index, max, filter) \ | 412 | #define xa_for_each(xa, index, entry) \ |
370 | for (entry = xa_find(xa, &index, max, filter); entry; \ | 413 | xa_for_each_start(xa, index, entry, 0) |
371 | entry = xa_find_after(xa, &index, max, filter)) | 414 | |
415 | /** | ||
416 | * xa_for_each_marked() - Iterate over marked entries in an XArray. | ||
417 | * @xa: XArray. | ||
418 | * @index: Index of @entry. | ||
419 | * @entry: Entry retrieved from array. | ||
420 | * @filter: Selection criterion. | ||
421 | * | ||
422 | * During the iteration, @entry will have the value of the entry stored | ||
423 | * in @xa at @index. The iteration will skip all entries in the array | ||
424 | * which do not match @filter. You may modify @index during the iteration | ||
425 | * if you want to skip or reprocess indices. It is safe to modify the array | ||
426 | * during the iteration. At the end of the iteration, @entry will be set to | ||
427 | * NULL and @index will have a value less than or equal to max. | ||
428 | * | ||
429 | * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n). | ||
430 | * You have to handle your own locking with xas_for_each(), and if you have | ||
431 | * to unlock after each iteration, it will also end up being O(n.log(n)). | ||
432 | * xa_for_each_marked() will spin if it hits a retry entry; if you intend to | ||
433 | * see retry entries, you should use the xas_for_each_marked() iterator | ||
434 | * instead. The xas_for_each_marked() iterator will expand into more inline | ||
435 | * code than xa_for_each_marked(). | ||
436 | * | ||
437 | * Context: Any context. Takes and releases the RCU lock. | ||
438 | */ | ||
439 | #define xa_for_each_marked(xa, index, entry, filter) \ | ||
440 | for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \ | ||
441 | entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter)) | ||
372 | 442 | ||
373 | #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) | 443 | #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) |
374 | #define xa_lock(xa) spin_lock(&(xa)->xa_lock) | 444 | #define xa_lock(xa) spin_lock(&(xa)->xa_lock) |
@@ -393,40 +463,13 @@ void *__xa_erase(struct xarray *, unsigned long index); | |||
393 | void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); | 463 | void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); |
394 | void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, | 464 | void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, |
395 | void *entry, gfp_t); | 465 | void *entry, gfp_t); |
466 | int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); | ||
396 | int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); | 467 | int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); |
397 | int __xa_reserve(struct xarray *, unsigned long index, gfp_t); | 468 | int __xa_reserve(struct xarray *, unsigned long index, gfp_t); |
398 | void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); | 469 | void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); |
399 | void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); | 470 | void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); |
400 | 471 | ||
401 | /** | 472 | /** |
402 | * __xa_insert() - Store this entry in the XArray unless another entry is | ||
403 | * already present. | ||
404 | * @xa: XArray. | ||
405 | * @index: Index into array. | ||
406 | * @entry: New entry. | ||
407 | * @gfp: Memory allocation flags. | ||
408 | * | ||
409 | * If you would rather see the existing entry in the array, use __xa_cmpxchg(). | ||
410 | * This function is for users who don't care what the entry is, only that | ||
411 | * one is present. | ||
412 | * | ||
413 | * Context: Any context. Expects xa_lock to be held on entry. May | ||
414 | * release and reacquire xa_lock if the @gfp flags permit. | ||
415 | * Return: 0 if the store succeeded. -EEXIST if another entry was present. | ||
416 | * -ENOMEM if memory could not be allocated. | ||
417 | */ | ||
418 | static inline int __xa_insert(struct xarray *xa, unsigned long index, | ||
419 | void *entry, gfp_t gfp) | ||
420 | { | ||
421 | void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp); | ||
422 | if (!curr) | ||
423 | return 0; | ||
424 | if (xa_is_err(curr)) | ||
425 | return xa_err(curr); | ||
426 | return -EEXIST; | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * xa_store_bh() - Store this entry in the XArray. | 473 | * xa_store_bh() - Store this entry in the XArray. |
431 | * @xa: XArray. | 474 | * @xa: XArray. |
432 | * @index: Index into array. | 475 | * @index: Index into array. |
@@ -453,7 +496,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, | |||
453 | } | 496 | } |
454 | 497 | ||
455 | /** | 498 | /** |
456 | * xa_store_irq() - Erase this entry from the XArray. | 499 | * xa_store_irq() - Store this entry in the XArray. |
457 | * @xa: XArray. | 500 | * @xa: XArray. |
458 | * @index: Index into array. | 501 | * @index: Index into array. |
459 | * @entry: New entry. | 502 | * @entry: New entry. |
@@ -615,24 +658,83 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, | |||
615 | * @entry: New entry. | 658 | * @entry: New entry. |
616 | * @gfp: Memory allocation flags. | 659 | * @gfp: Memory allocation flags. |
617 | * | 660 | * |
618 | * If you would rather see the existing entry in the array, use xa_cmpxchg(). | 661 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) |
619 | * This function is for users who don't care what the entry is, only that | 662 | * if no entry is present. Inserting will fail if a reserved entry is |
620 | * one is present. | 663 | * present, even though loading from this index will return NULL. |
621 | * | 664 | * |
622 | * Context: Process context. Takes and releases the xa_lock. | 665 | * Context: Any context. Takes and releases the xa_lock. May sleep if |
623 | * May sleep if the @gfp flags permit. | 666 | * the @gfp flags permit. |
624 | * Return: 0 if the store succeeded. -EEXIST if another entry was present. | 667 | * Return: 0 if the store succeeded. -EEXIST if another entry was present. |
625 | * -ENOMEM if memory could not be allocated. | 668 | * -ENOMEM if memory could not be allocated. |
626 | */ | 669 | */ |
627 | static inline int xa_insert(struct xarray *xa, unsigned long index, | 670 | static inline int xa_insert(struct xarray *xa, unsigned long index, |
628 | void *entry, gfp_t gfp) | 671 | void *entry, gfp_t gfp) |
629 | { | 672 | { |
630 | void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); | 673 | int err; |
631 | if (!curr) | 674 | |
632 | return 0; | 675 | xa_lock(xa); |
633 | if (xa_is_err(curr)) | 676 | err = __xa_insert(xa, index, entry, gfp); |
634 | return xa_err(curr); | 677 | xa_unlock(xa); |
635 | return -EEXIST; | 678 | |
679 | return err; | ||
680 | } | ||
681 | |||
682 | /** | ||
683 | * xa_insert_bh() - Store this entry in the XArray unless another entry is | ||
684 | * already present. | ||
685 | * @xa: XArray. | ||
686 | * @index: Index into array. | ||
687 | * @entry: New entry. | ||
688 | * @gfp: Memory allocation flags. | ||
689 | * | ||
690 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) | ||
691 | * if no entry is present. Inserting will fail if a reserved entry is | ||
692 | * present, even though loading from this index will return NULL. | ||
693 | * | ||
694 | * Context: Any context. Takes and releases the xa_lock while | ||
695 | * disabling softirqs. May sleep if the @gfp flags permit. | ||
696 | * Return: 0 if the store succeeded. -EEXIST if another entry was present. | ||
697 | * -ENOMEM if memory could not be allocated. | ||
698 | */ | ||
699 | static inline int xa_insert_bh(struct xarray *xa, unsigned long index, | ||
700 | void *entry, gfp_t gfp) | ||
701 | { | ||
702 | int err; | ||
703 | |||
704 | xa_lock_bh(xa); | ||
705 | err = __xa_insert(xa, index, entry, gfp); | ||
706 | xa_unlock_bh(xa); | ||
707 | |||
708 | return err; | ||
709 | } | ||
710 | |||
711 | /** | ||
712 | * xa_insert_irq() - Store this entry in the XArray unless another entry is | ||
713 | * already present. | ||
714 | * @xa: XArray. | ||
715 | * @index: Index into array. | ||
716 | * @entry: New entry. | ||
717 | * @gfp: Memory allocation flags. | ||
718 | * | ||
719 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) | ||
720 | * if no entry is present. Inserting will fail if a reserved entry is | ||
721 | * present, even though loading from this index will return NULL. | ||
722 | * | ||
723 | * Context: Process context. Takes and releases the xa_lock while | ||
724 | * disabling interrupts. May sleep if the @gfp flags permit. | ||
725 | * Return: 0 if the store succeeded. -EEXIST if another entry was present. | ||
726 | * -ENOMEM if memory could not be allocated. | ||
727 | */ | ||
728 | static inline int xa_insert_irq(struct xarray *xa, unsigned long index, | ||
729 | void *entry, gfp_t gfp) | ||
730 | { | ||
731 | int err; | ||
732 | |||
733 | xa_lock_irq(xa); | ||
734 | err = __xa_insert(xa, index, entry, gfp); | ||
735 | xa_unlock_irq(xa); | ||
736 | |||
737 | return err; | ||
636 | } | 738 | } |
637 | 739 | ||
638 | /** | 740 | /** |
@@ -970,8 +1072,8 @@ static inline bool xa_is_sibling(const void *entry) | |||
970 | (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); | 1072 | (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); |
971 | } | 1073 | } |
972 | 1074 | ||
973 | #define XA_ZERO_ENTRY xa_mk_internal(256) | 1075 | #define XA_RETRY_ENTRY xa_mk_internal(256) |
974 | #define XA_RETRY_ENTRY xa_mk_internal(257) | 1076 | #define XA_ZERO_ENTRY xa_mk_internal(257) |
975 | 1077 | ||
976 | /** | 1078 | /** |
977 | * xa_is_zero() - Is the entry a zero entry? | 1079 | * xa_is_zero() - Is the entry a zero entry? |
@@ -996,6 +1098,17 @@ static inline bool xa_is_retry(const void *entry) | |||
996 | } | 1098 | } |
997 | 1099 | ||
998 | /** | 1100 | /** |
1101 | * xa_is_advanced() - Is the entry only permitted for the advanced API? | ||
1102 | * @entry: Entry to be stored in the XArray. | ||
1103 | * | ||
1104 | * Return: %true if the entry cannot be stored by the normal API. | ||
1105 | */ | ||
1106 | static inline bool xa_is_advanced(const void *entry) | ||
1107 | { | ||
1108 | return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY); | ||
1109 | } | ||
1110 | |||
1111 | /** | ||
999 | * typedef xa_update_node_t - A callback function from the XArray. | 1112 | * typedef xa_update_node_t - A callback function from the XArray. |
1000 | * @node: The node which is being processed | 1113 | * @node: The node which is being processed |
1001 | * | 1114 | * |