aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/xarray.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/xarray.h')
-rw-r--r--include/linux/xarray.h102
1 files changed, 102 insertions, 0 deletions
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 883bb958e462..5ed6b462e754 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -242,6 +242,7 @@ enum xa_lock_type {
242#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) 242#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
243#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) 243#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
244#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) 244#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U)
245#define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U)
245#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ 246#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
246 (__force unsigned)(mark))) 247 (__force unsigned)(mark)))
247 248
@@ -499,6 +500,8 @@ void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
499int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); 500int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t);
500int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, 501int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry,
501 struct xa_limit, gfp_t); 502 struct xa_limit, gfp_t);
503int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry,
504 struct xa_limit, u32 *next, gfp_t);
502int __xa_reserve(struct xarray *, unsigned long index, gfp_t); 505int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
503void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); 506void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
504void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); 507void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -859,6 +862,105 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
859} 862}
860 863
861/** 864/**
865 * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
866 * @xa: XArray.
867 * @id: Pointer to ID.
868 * @entry: New entry.
869 * @limit: Range of allocated ID.
870 * @next: Pointer to next ID to allocate.
871 * @gfp: Memory allocation flags.
872 *
873 * Finds an empty entry in @xa between @limit.min and @limit.max,
874 * stores the index into the @id pointer, then stores the entry at
875 * that index. A concurrent lookup will not see an uninitialised @id.
876 * The search for an empty entry will start at @next and will wrap
877 * around if necessary.
878 *
879 * Context: Any context. Takes and releases the xa_lock. May sleep if
880 * the @gfp flags permit.
881 * Return: 0 if the allocation succeeded without wrapping. 1 if the
882 * allocation succeeded after wrapping, -ENOMEM if memory could not be
883 * allocated or -EBUSY if there are no free entries in @limit.
884 */
885static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
886 struct xa_limit limit, u32 *next, gfp_t gfp)
887{
888 int err;
889
890 xa_lock(xa);
891 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
892 xa_unlock(xa);
893
894 return err;
895}
896
897/**
898 * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray.
899 * @xa: XArray.
900 * @id: Pointer to ID.
901 * @entry: New entry.
902 * @limit: Range of allocated ID.
903 * @next: Pointer to next ID to allocate.
904 * @gfp: Memory allocation flags.
905 *
906 * Finds an empty entry in @xa between @limit.min and @limit.max,
907 * stores the index into the @id pointer, then stores the entry at
908 * that index. A concurrent lookup will not see an uninitialised @id.
909 * The search for an empty entry will start at @next and will wrap
910 * around if necessary.
911 *
912 * Context: Any context. Takes and releases the xa_lock while
913 * disabling softirqs. May sleep if the @gfp flags permit.
914 * Return: 0 if the allocation succeeded without wrapping. 1 if the
915 * allocation succeeded after wrapping, -ENOMEM if memory could not be
916 * allocated or -EBUSY if there are no free entries in @limit.
917 */
918static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
919 struct xa_limit limit, u32 *next, gfp_t gfp)
920{
921 int err;
922
923 xa_lock_bh(xa);
924 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
925 xa_unlock_bh(xa);
926
927 return err;
928}
929
930/**
931 * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray.
932 * @xa: XArray.
933 * @id: Pointer to ID.
934 * @entry: New entry.
935 * @limit: Range of allocated ID.
936 * @next: Pointer to next ID to allocate.
937 * @gfp: Memory allocation flags.
938 *
939 * Finds an empty entry in @xa between @limit.min and @limit.max,
940 * stores the index into the @id pointer, then stores the entry at
941 * that index. A concurrent lookup will not see an uninitialised @id.
942 * The search for an empty entry will start at @next and will wrap
943 * around if necessary.
944 *
945 * Context: Process context. Takes and releases the xa_lock while
946 * disabling interrupts. May sleep if the @gfp flags permit.
947 * Return: 0 if the allocation succeeded without wrapping. 1 if the
948 * allocation succeeded after wrapping, -ENOMEM if memory could not be
949 * allocated or -EBUSY if there are no free entries in @limit.
950 */
951static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
952 struct xa_limit limit, u32 *next, gfp_t gfp)
953{
954 int err;
955
956 xa_lock_irq(xa);
957 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
958 xa_unlock_irq(xa);
959
960 return err;
961}
962
963/**
862 * xa_reserve() - Reserve this index in the XArray. 964 * xa_reserve() - Reserve this index in the XArray.
863 * @xa: XArray. 965 * @xa: XArray.
864 * @index: Index into array. 966 * @index: Index into array.