diff options
| -rw-r--r-- | Documentation/core-api/idr.rst | 79 | ||||
| -rw-r--r-- | Documentation/core-api/index.rst | 1 | ||||
| -rw-r--r-- | Documentation/core-api/kernel-api.rst | 12 | ||||
| -rw-r--r-- | include/linux/idr.h | 174 | ||||
| -rw-r--r-- | include/linux/radix-tree.h | 17 | ||||
| -rw-r--r-- | lib/idr.c | 255 | ||||
| -rw-r--r-- | lib/radix-tree.c | 3 | ||||
| -rw-r--r-- | net/sched/act_api.c | 72 | ||||
| -rw-r--r-- | net/sched/cls_api.c | 8 | ||||
| -rw-r--r-- | net/sched/cls_basic.c | 33 | ||||
| -rw-r--r-- | net/sched/cls_bpf.c | 30 | ||||
| -rw-r--r-- | net/sched/cls_flower.c | 34 | ||||
| -rw-r--r-- | net/sched/cls_u32.c | 47 | ||||
| -rw-r--r-- | tools/testing/radix-tree/idr-test.c | 29 | ||||
| -rw-r--r-- | tools/testing/radix-tree/linux/kernel.h | 2 |
15 files changed, 471 insertions, 325 deletions
diff --git a/Documentation/core-api/idr.rst b/Documentation/core-api/idr.rst new file mode 100644 index 000000000000..9078a5c3ac95 --- /dev/null +++ b/Documentation/core-api/idr.rst | |||
| @@ -0,0 +1,79 @@ | |||
| 1 | .. SPDX-License-Identifier: CC-BY-SA-4.0 | ||
| 2 | |||
| 3 | ============= | ||
| 4 | ID Allocation | ||
| 5 | ============= | ||
| 6 | |||
| 7 | :Author: Matthew Wilcox | ||
| 8 | |||
| 9 | Overview | ||
| 10 | ======== | ||
| 11 | |||
| 12 | A common problem to solve is allocating identifiers (IDs); generally | ||
| 13 | small numbers which identify a thing. Examples include file descriptors, | ||
| 14 | process IDs, packet identifiers in networking protocols, SCSI tags | ||
| 15 | and device instance numbers. The IDR and the IDA provide a reasonable | ||
| 16 | solution to the problem to avoid everybody inventing their own. The IDR | ||
| 17 | provides the ability to map an ID to a pointer, while the IDA provides | ||
| 18 | only ID allocation, and as a result is much more memory-efficient. | ||
| 19 | |||
| 20 | IDR usage | ||
| 21 | ========= | ||
| 22 | |||
| 23 | Start by initialising an IDR, either with :c:func:`DEFINE_IDR` | ||
| 24 | for statically allocated IDRs or :c:func:`idr_init` for dynamically | ||
| 25 | allocated IDRs. | ||
| 26 | |||
| 27 | You can call :c:func:`idr_alloc` to allocate an unused ID. Look up | ||
| 28 | the pointer you associated with the ID by calling :c:func:`idr_find` | ||
| 29 | and free the ID by calling :c:func:`idr_remove`. | ||
| 30 | |||
| 31 | If you need to change the pointer associated with an ID, you can call | ||
| 32 | :c:func:`idr_replace`. One common reason to do this is to reserve an | ||
| 33 | ID by passing a ``NULL`` pointer to the allocation function; initialise the | ||
| 34 | object with the reserved ID and finally insert the initialised object | ||
| 35 | into the IDR. | ||
| 36 | |||
| 37 | Some users need to allocate IDs larger than ``INT_MAX``. So far all of | ||
| 38 | these users have been content with a ``UINT_MAX`` limit, and they use | ||
| 39 | :c:func:`idr_alloc_u32`. If you need IDs that will not fit in a u32, | ||
| 40 | we will work with you to address your needs. | ||
| 41 | |||
| 42 | If you need to allocate IDs sequentially, you can use | ||
| 43 | :c:func:`idr_alloc_cyclic`. The IDR becomes less efficient when dealing | ||
| 44 | with larger IDs, so using this function comes at a slight cost. | ||
| 45 | |||
| 46 | To perform an action on all pointers used by the IDR, you can | ||
| 47 | either use the callback-based :c:func:`idr_for_each` or the | ||
| 48 | iterator-style :c:func:`idr_for_each_entry`. You may need to use | ||
| 49 | :c:func:`idr_for_each_entry_continue` to continue an iteration. You can | ||
| 50 | also use :c:func:`idr_get_next` if the iterator doesn't fit your needs. | ||
| 51 | |||
| 52 | When you have finished using an IDR, you can call :c:func:`idr_destroy` | ||
| 53 | to release the memory used by the IDR. This will not free the objects | ||
| 54 | pointed to from the IDR; if you want to do that, use one of the iterators | ||
| 55 | to do it. | ||
| 56 | |||
| 57 | You can use :c:func:`idr_is_empty` to find out whether there are any | ||
| 58 | IDs currently allocated. | ||
| 59 | |||
| 60 | If you need to take a lock while allocating a new ID from the IDR, | ||
| 61 | you may need to pass a restrictive set of GFP flags, which can lead | ||
| 62 | to the IDR being unable to allocate memory. To work around this, | ||
| 63 | you can call :c:func:`idr_preload` before taking the lock, and then | ||
| 64 | :c:func:`idr_preload_end` after the allocation. | ||
| 65 | |||
| 66 | .. kernel-doc:: include/linux/idr.h | ||
| 67 | :doc: idr sync | ||
| 68 | |||
| 69 | IDA usage | ||
| 70 | ========= | ||
| 71 | |||
| 72 | .. kernel-doc:: lib/idr.c | ||
| 73 | :doc: IDA description | ||
| 74 | |||
| 75 | Functions and structures | ||
| 76 | ======================== | ||
| 77 | |||
| 78 | .. kernel-doc:: include/linux/idr.h | ||
| 79 | .. kernel-doc:: lib/idr.c | ||
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst index 1b1fd01990b5..c670a8031786 100644 --- a/Documentation/core-api/index.rst +++ b/Documentation/core-api/index.rst | |||
| @@ -16,6 +16,7 @@ Core utilities | |||
| 16 | atomic_ops | 16 | atomic_ops |
| 17 | refcount-vs-atomic | 17 | refcount-vs-atomic |
| 18 | cpu_hotplug | 18 | cpu_hotplug |
| 19 | idr | ||
| 19 | local_ops | 20 | local_ops |
| 20 | workqueue | 21 | workqueue |
| 21 | genericirq | 22 | genericirq |
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst index e7fadf02c511..ff335f8aeb39 100644 --- a/Documentation/core-api/kernel-api.rst +++ b/Documentation/core-api/kernel-api.rst | |||
| @@ -103,18 +103,6 @@ CRC Functions | |||
| 103 | .. kernel-doc:: lib/crc-itu-t.c | 103 | .. kernel-doc:: lib/crc-itu-t.c |
| 104 | :export: | 104 | :export: |
| 105 | 105 | ||
| 106 | idr/ida Functions | ||
| 107 | ----------------- | ||
| 108 | |||
| 109 | .. kernel-doc:: include/linux/idr.h | ||
| 110 | :doc: idr sync | ||
| 111 | |||
| 112 | .. kernel-doc:: lib/idr.c | ||
| 113 | :doc: IDA description | ||
| 114 | |||
| 115 | .. kernel-doc:: lib/idr.c | ||
| 116 | :export: | ||
| 117 | |||
| 118 | Math Functions in Linux | 106 | Math Functions in Linux |
| 119 | ======================= | 107 | ======================= |
| 120 | 108 | ||
diff --git a/include/linux/idr.h b/include/linux/idr.h index fa14f834e4ed..7d6a6313f0ab 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h | |||
| @@ -15,10 +15,10 @@ | |||
| 15 | #include <linux/radix-tree.h> | 15 | #include <linux/radix-tree.h> |
| 16 | #include <linux/gfp.h> | 16 | #include <linux/gfp.h> |
| 17 | #include <linux/percpu.h> | 17 | #include <linux/percpu.h> |
| 18 | #include <linux/bug.h> | ||
| 19 | 18 | ||
| 20 | struct idr { | 19 | struct idr { |
| 21 | struct radix_tree_root idr_rt; | 20 | struct radix_tree_root idr_rt; |
| 21 | unsigned int idr_base; | ||
| 22 | unsigned int idr_next; | 22 | unsigned int idr_next; |
| 23 | }; | 23 | }; |
| 24 | 24 | ||
| @@ -31,10 +31,26 @@ struct idr { | |||
| 31 | /* Set the IDR flag and the IDR_FREE tag */ | 31 | /* Set the IDR flag and the IDR_FREE tag */ |
| 32 | #define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT)) | 32 | #define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT)) |
| 33 | 33 | ||
| 34 | #define IDR_INIT \ | 34 | #define IDR_INIT_BASE(base) { \ |
| 35 | { \ | 35 | .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER), \ |
| 36 | .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \ | 36 | .idr_base = (base), \ |
| 37 | .idr_next = 0, \ | ||
| 37 | } | 38 | } |
| 39 | |||
| 40 | /** | ||
| 41 | * IDR_INIT() - Initialise an IDR. | ||
| 42 | * | ||
| 43 | * A freshly-initialised IDR contains no IDs. | ||
| 44 | */ | ||
| 45 | #define IDR_INIT IDR_INIT_BASE(0) | ||
| 46 | |||
| 47 | /** | ||
| 48 | * DEFINE_IDR() - Define a statically-allocated IDR | ||
| 49 | * @name: Name of IDR | ||
| 50 | * | ||
| 51 | * An IDR defined using this macro is ready for use with no additional | ||
| 52 | * initialisation required. It contains no IDs. | ||
| 53 | */ | ||
| 38 | #define DEFINE_IDR(name) struct idr name = IDR_INIT | 54 | #define DEFINE_IDR(name) struct idr name = IDR_INIT |
| 39 | 55 | ||
| 40 | /** | 56 | /** |
| @@ -82,80 +98,52 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val) | |||
| 82 | 98 | ||
| 83 | void idr_preload(gfp_t gfp_mask); | 99 | void idr_preload(gfp_t gfp_mask); |
| 84 | 100 | ||
| 85 | int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index, | 101 | int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t); |
| 86 | unsigned long start, unsigned long end, gfp_t gfp, | 102 | int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id, |
| 87 | bool ext); | 103 | unsigned long max, gfp_t); |
| 88 | 104 | int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t); | |
| 89 | /** | 105 | void *idr_remove(struct idr *, unsigned long id); |
| 90 | * idr_alloc - allocate an id | 106 | void *idr_find(const struct idr *, unsigned long id); |
| 91 | * @idr: idr handle | ||
| 92 | * @ptr: pointer to be associated with the new id | ||
| 93 | * @start: the minimum id (inclusive) | ||
| 94 | * @end: the maximum id (exclusive) | ||
| 95 | * @gfp: memory allocation flags | ||
| 96 | * | ||
| 97 | * Allocates an unused ID in the range [start, end). Returns -ENOSPC | ||
| 98 | * if there are no unused IDs in that range. | ||
| 99 | * | ||
| 100 | * Note that @end is treated as max when <= 0. This is to always allow | ||
| 101 | * using @start + N as @end as long as N is inside integer range. | ||
| 102 | * | ||
| 103 | * Simultaneous modifications to the @idr are not allowed and should be | ||
| 104 | * prevented by the user, usually with a lock. idr_alloc() may be called | ||
| 105 | * concurrently with read-only accesses to the @idr, such as idr_find() and | ||
| 106 | * idr_for_each_entry(). | ||
| 107 | */ | ||
| 108 | static inline int idr_alloc(struct idr *idr, void *ptr, | ||
| 109 | int start, int end, gfp_t gfp) | ||
| 110 | { | ||
| 111 | unsigned long id; | ||
| 112 | int ret; | ||
| 113 | |||
| 114 | if (WARN_ON_ONCE(start < 0)) | ||
| 115 | return -EINVAL; | ||
| 116 | |||
| 117 | ret = idr_alloc_cmn(idr, ptr, &id, start, end, gfp, false); | ||
| 118 | |||
| 119 | if (ret) | ||
| 120 | return ret; | ||
| 121 | |||
| 122 | return id; | ||
| 123 | } | ||
| 124 | |||
| 125 | static inline int idr_alloc_ext(struct idr *idr, void *ptr, | ||
| 126 | unsigned long *index, | ||
| 127 | unsigned long start, | ||
| 128 | unsigned long end, | ||
| 129 | gfp_t gfp) | ||
| 130 | { | ||
| 131 | return idr_alloc_cmn(idr, ptr, index, start, end, gfp, true); | ||
| 132 | } | ||
| 133 | |||
| 134 | int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t); | ||
| 135 | int idr_for_each(const struct idr *, | 107 | int idr_for_each(const struct idr *, |
| 136 | int (*fn)(int id, void *p, void *data), void *data); | 108 | int (*fn)(int id, void *p, void *data), void *data); |
| 137 | void *idr_get_next(struct idr *, int *nextid); | 109 | void *idr_get_next(struct idr *, int *nextid); |
| 138 | void *idr_get_next_ext(struct idr *idr, unsigned long *nextid); | 110 | void *idr_get_next_ul(struct idr *, unsigned long *nextid); |
| 139 | void *idr_replace(struct idr *, void *, int id); | 111 | void *idr_replace(struct idr *, void *, unsigned long id); |
| 140 | void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id); | ||
| 141 | void idr_destroy(struct idr *); | 112 | void idr_destroy(struct idr *); |
| 142 | 113 | ||
| 143 | static inline void *idr_remove_ext(struct idr *idr, unsigned long id) | 114 | /** |
| 144 | { | 115 | * idr_init_base() - Initialise an IDR. |
| 145 | return radix_tree_delete_item(&idr->idr_rt, id, NULL); | 116 | * @idr: IDR handle. |
| 146 | } | 117 | * @base: The base value for the IDR. |
| 147 | 118 | * | |
| 148 | static inline void *idr_remove(struct idr *idr, int id) | 119 | * This variation of idr_init() creates an IDR which will allocate IDs |
| 120 | * starting at %base. | ||
| 121 | */ | ||
| 122 | static inline void idr_init_base(struct idr *idr, int base) | ||
| 149 | { | 123 | { |
| 150 | return idr_remove_ext(idr, id); | 124 | INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); |
| 125 | idr->idr_base = base; | ||
| 126 | idr->idr_next = 0; | ||
| 151 | } | 127 | } |
| 152 | 128 | ||
| 129 | /** | ||
| 130 | * idr_init() - Initialise an IDR. | ||
| 131 | * @idr: IDR handle. | ||
| 132 | * | ||
| 133 | * Initialise a dynamically allocated IDR. To initialise a | ||
| 134 | * statically allocated IDR, use DEFINE_IDR(). | ||
| 135 | */ | ||
| 153 | static inline void idr_init(struct idr *idr) | 136 | static inline void idr_init(struct idr *idr) |
| 154 | { | 137 | { |
| 155 | INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); | 138 | idr_init_base(idr, 0); |
| 156 | idr->idr_next = 0; | ||
| 157 | } | 139 | } |
| 158 | 140 | ||
| 141 | /** | ||
| 142 | * idr_is_empty() - Are there any IDs allocated? | ||
| 143 | * @idr: IDR handle. | ||
| 144 | * | ||
| 145 | * Return: %true if any IDs have been allocated from this IDR. | ||
| 146 | */ | ||
| 159 | static inline bool idr_is_empty(const struct idr *idr) | 147 | static inline bool idr_is_empty(const struct idr *idr) |
| 160 | { | 148 | { |
| 161 | return radix_tree_empty(&idr->idr_rt) && | 149 | return radix_tree_empty(&idr->idr_rt) && |
| @@ -174,50 +162,38 @@ static inline void idr_preload_end(void) | |||
| 174 | } | 162 | } |
| 175 | 163 | ||
| 176 | /** | 164 | /** |
| 177 | * idr_find - return pointer for given id | 165 | * idr_for_each_entry() - Iterate over an IDR's elements of a given type. |
| 178 | * @idr: idr handle | 166 | * @idr: IDR handle. |
| 179 | * @id: lookup key | 167 | * @entry: The type * to use as cursor |
| 180 | * | 168 | * @id: Entry ID. |
| 181 | * Return the pointer given the id it has been registered with. A %NULL | ||
| 182 | * return indicates that @id is not valid or you passed %NULL in | ||
| 183 | * idr_get_new(). | ||
| 184 | * | 169 | * |
| 185 | * This function can be called under rcu_read_lock(), given that the leaf | 170 | * @entry and @id do not need to be initialized before the loop, and |
| 186 | * pointers lifetimes are correctly managed. | 171 | * after normal termination @entry is left with the value NULL. This |
| 172 | * is convenient for a "not found" value. | ||
| 187 | */ | 173 | */ |
| 188 | static inline void *idr_find_ext(const struct idr *idr, unsigned long id) | 174 | #define idr_for_each_entry(idr, entry, id) \ |
| 189 | { | 175 | for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id) |
| 190 | return radix_tree_lookup(&idr->idr_rt, id); | ||
| 191 | } | ||
| 192 | |||
| 193 | static inline void *idr_find(const struct idr *idr, int id) | ||
| 194 | { | ||
| 195 | return idr_find_ext(idr, id); | ||
| 196 | } | ||
| 197 | 176 | ||
| 198 | /** | 177 | /** |
| 199 | * idr_for_each_entry - iterate over an idr's elements of a given type | 178 | * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type. |
| 200 | * @idr: idr handle | 179 | * @idr: IDR handle. |
| 201 | * @entry: the type * to use as cursor | 180 | * @entry: The type * to use as cursor. |
| 202 | * @id: id entry's key | 181 | * @id: Entry ID. |
| 203 | * | 182 | * |
| 204 | * @entry and @id do not need to be initialized before the loop, and | 183 | * @entry and @id do not need to be initialized before the loop, and |
| 205 | * after normal terminatinon @entry is left with the value NULL. This | 184 | * after normal termination @entry is left with the value NULL. This |
| 206 | * is convenient for a "not found" value. | 185 | * is convenient for a "not found" value. |
| 207 | */ | 186 | */ |
| 208 | #define idr_for_each_entry(idr, entry, id) \ | 187 | #define idr_for_each_entry_ul(idr, entry, id) \ |
| 209 | for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id) | 188 | for (id = 0; ((entry) = idr_get_next_ul(idr, &(id))) != NULL; ++id) |
| 210 | #define idr_for_each_entry_ext(idr, entry, id) \ | ||
| 211 | for (id = 0; ((entry) = idr_get_next_ext(idr, &(id))) != NULL; ++id) | ||
| 212 | 189 | ||
| 213 | /** | 190 | /** |
| 214 | * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type | 191 | * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type |
| 215 | * @idr: idr handle | 192 | * @idr: IDR handle. |
| 216 | * @entry: the type * to use as cursor | 193 | * @entry: The type * to use as a cursor. |
| 217 | * @id: id entry's key | 194 | * @id: Entry ID. |
| 218 | * | 195 | * |
| 219 | * Continue to iterate over list of given type, continuing after | 196 | * Continue to iterate over entries, continuing after the current position. |
| 220 | * the current position. | ||
| 221 | */ | 197 | */ |
| 222 | #define idr_for_each_entry_continue(idr, entry, id) \ | 198 | #define idr_for_each_entry_continue(idr, entry, id) \ |
| 223 | for ((entry) = idr_get_next((idr), &(id)); \ | 199 | for ((entry) = idr_get_next((idr), &(id)); \ |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 23a9c89c7ad9..fc55ff31eca7 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
| @@ -356,24 +356,9 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index, | |||
| 356 | int radix_tree_join(struct radix_tree_root *, unsigned long index, | 356 | int radix_tree_join(struct radix_tree_root *, unsigned long index, |
| 357 | unsigned new_order, void *); | 357 | unsigned new_order, void *); |
| 358 | 358 | ||
| 359 | void __rcu **idr_get_free_cmn(struct radix_tree_root *root, | 359 | void __rcu **idr_get_free(struct radix_tree_root *root, |
| 360 | struct radix_tree_iter *iter, gfp_t gfp, | 360 | struct radix_tree_iter *iter, gfp_t gfp, |
| 361 | unsigned long max); | 361 | unsigned long max); |
| 362 | static inline void __rcu **idr_get_free(struct radix_tree_root *root, | ||
| 363 | struct radix_tree_iter *iter, | ||
| 364 | gfp_t gfp, | ||
| 365 | int end) | ||
| 366 | { | ||
| 367 | return idr_get_free_cmn(root, iter, gfp, end > 0 ? end - 1 : INT_MAX); | ||
| 368 | } | ||
| 369 | |||
| 370 | static inline void __rcu **idr_get_free_ext(struct radix_tree_root *root, | ||
| 371 | struct radix_tree_iter *iter, | ||
| 372 | gfp_t gfp, | ||
| 373 | unsigned long end) | ||
| 374 | { | ||
| 375 | return idr_get_free_cmn(root, iter, gfp, end - 1); | ||
| 376 | } | ||
| 377 | 362 | ||
| 378 | enum { | 363 | enum { |
| 379 | RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ | 364 | RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ |
| @@ -1,4 +1,5 @@ | |||
| 1 | #include <linux/bitmap.h> | 1 | #include <linux/bitmap.h> |
| 2 | #include <linux/bug.h> | ||
| 2 | #include <linux/export.h> | 3 | #include <linux/export.h> |
| 3 | #include <linux/idr.h> | 4 | #include <linux/idr.h> |
| 4 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
| @@ -7,71 +8,184 @@ | |||
| 7 | DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap); | 8 | DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap); |
| 8 | static DEFINE_SPINLOCK(simple_ida_lock); | 9 | static DEFINE_SPINLOCK(simple_ida_lock); |
| 9 | 10 | ||
| 10 | int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index, | 11 | /** |
| 11 | unsigned long start, unsigned long end, gfp_t gfp, | 12 | * idr_alloc_u32() - Allocate an ID. |
| 12 | bool ext) | 13 | * @idr: IDR handle. |
| 14 | * @ptr: Pointer to be associated with the new ID. | ||
| 15 | * @nextid: Pointer to an ID. | ||
| 16 | * @max: The maximum ID to allocate (inclusive). | ||
| 17 | * @gfp: Memory allocation flags. | ||
| 18 | * | ||
| 19 | * Allocates an unused ID in the range specified by @nextid and @max. | ||
| 20 | * Note that @max is inclusive whereas the @end parameter to idr_alloc() | ||
| 21 | * is exclusive. The new ID is assigned to @nextid before the pointer | ||
| 22 | * is inserted into the IDR, so if @nextid points into the object pointed | ||
| 23 | * to by @ptr, a concurrent lookup will not find an uninitialised ID. | ||
| 24 | * | ||
| 25 | * The caller should provide their own locking to ensure that two | ||
| 26 | * concurrent modifications to the IDR are not possible. Read-only | ||
| 27 | * accesses to the IDR may be done under the RCU read lock or may | ||
| 28 | * exclude simultaneous writers. | ||
| 29 | * | ||
| 30 | * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed, | ||
| 31 | * or -ENOSPC if no free IDs could be found. If an error occurred, | ||
| 32 | * @nextid is unchanged. | ||
| 33 | */ | ||
| 34 | int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid, | ||
| 35 | unsigned long max, gfp_t gfp) | ||
| 13 | { | 36 | { |
| 14 | struct radix_tree_iter iter; | 37 | struct radix_tree_iter iter; |
| 15 | void __rcu **slot; | 38 | void __rcu **slot; |
| 39 | int base = idr->idr_base; | ||
| 40 | int id = *nextid; | ||
| 16 | 41 | ||
| 17 | if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) | 42 | if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) |
| 18 | return -EINVAL; | 43 | return -EINVAL; |
| 44 | if (WARN_ON_ONCE(!(idr->idr_rt.gfp_mask & ROOT_IS_IDR))) | ||
| 45 | idr->idr_rt.gfp_mask |= IDR_RT_MARKER; | ||
| 19 | 46 | ||
| 20 | radix_tree_iter_init(&iter, start); | 47 | id = (id < base) ? 0 : id - base; |
| 21 | if (ext) | 48 | radix_tree_iter_init(&iter, id); |
| 22 | slot = idr_get_free_ext(&idr->idr_rt, &iter, gfp, end); | 49 | slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base); |
| 23 | else | ||
| 24 | slot = idr_get_free(&idr->idr_rt, &iter, gfp, end); | ||
| 25 | if (IS_ERR(slot)) | 50 | if (IS_ERR(slot)) |
| 26 | return PTR_ERR(slot); | 51 | return PTR_ERR(slot); |
| 27 | 52 | ||
| 53 | *nextid = iter.index + base; | ||
| 54 | /* there is a memory barrier inside radix_tree_iter_replace() */ | ||
| 28 | radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr); | 55 | radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr); |
| 29 | radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE); | 56 | radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE); |
| 30 | 57 | ||
| 31 | if (index) | ||
| 32 | *index = iter.index; | ||
| 33 | return 0; | 58 | return 0; |
| 34 | } | 59 | } |
| 35 | EXPORT_SYMBOL_GPL(idr_alloc_cmn); | 60 | EXPORT_SYMBOL_GPL(idr_alloc_u32); |
| 36 | 61 | ||
| 37 | /** | 62 | /** |
| 38 | * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion | 63 | * idr_alloc() - Allocate an ID. |
| 39 | * @idr: idr handle | 64 | * @idr: IDR handle. |
| 40 | * @ptr: pointer to be associated with the new id | 65 | * @ptr: Pointer to be associated with the new ID. |
| 41 | * @start: the minimum id (inclusive) | 66 | * @start: The minimum ID (inclusive). |
| 42 | * @end: the maximum id (exclusive) | 67 | * @end: The maximum ID (exclusive). |
| 43 | * @gfp: memory allocation flags | 68 | * @gfp: Memory allocation flags. |
| 44 | * | 69 | * |
| 45 | * Allocates an ID larger than the last ID allocated if one is available. | 70 | * Allocates an unused ID in the range specified by @start and @end. If |
| 46 | * If not, it will attempt to allocate the smallest ID that is larger or | 71 | * @end is <= 0, it is treated as one larger than %INT_MAX. This allows |
| 47 | * equal to @start. | 72 | * callers to use @start + N as @end as long as N is within integer range. |
| 73 | * | ||
| 74 | * The caller should provide their own locking to ensure that two | ||
| 75 | * concurrent modifications to the IDR are not possible. Read-only | ||
| 76 | * accesses to the IDR may be done under the RCU read lock or may | ||
| 77 | * exclude simultaneous writers. | ||
| 78 | * | ||
| 79 | * Return: The newly allocated ID, -ENOMEM if memory allocation failed, | ||
| 80 | * or -ENOSPC if no free IDs could be found. | ||
| 48 | */ | 81 | */ |
| 49 | int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) | 82 | int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) |
| 50 | { | 83 | { |
| 51 | int id, curr = idr->idr_next; | 84 | u32 id = start; |
| 85 | int ret; | ||
| 86 | |||
| 87 | if (WARN_ON_ONCE(start < 0)) | ||
| 88 | return -EINVAL; | ||
| 89 | |||
| 90 | ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp); | ||
| 91 | if (ret) | ||
| 92 | return ret; | ||
| 93 | |||
| 94 | return id; | ||
| 95 | } | ||
| 96 | EXPORT_SYMBOL_GPL(idr_alloc); | ||
| 52 | 97 | ||
| 53 | if (curr < start) | 98 | /** |
| 54 | curr = start; | 99 | * idr_alloc_cyclic() - Allocate an ID cyclically. |
| 100 | * @idr: IDR handle. | ||
| 101 | * @ptr: Pointer to be associated with the new ID. | ||
| 102 | * @start: The minimum ID (inclusive). | ||
| 103 | * @end: The maximum ID (exclusive). | ||
| 104 | * @gfp: Memory allocation flags. | ||
| 105 | * | ||
| 106 | * Allocates an unused ID in the range specified by @nextid and @end. If | ||
| 107 | * @end is <= 0, it is treated as one larger than %INT_MAX. This allows | ||
| 108 | * callers to use @start + N as @end as long as N is within integer range. | ||
| 109 | * The search for an unused ID will start at the last ID allocated and will | ||
| 110 | * wrap around to @start if no free IDs are found before reaching @end. | ||
| 111 | * | ||
| 112 | * The caller should provide their own locking to ensure that two | ||
| 113 | * concurrent modifications to the IDR are not possible. Read-only | ||
| 114 | * accesses to the IDR may be done under the RCU read lock or may | ||
| 115 | * exclude simultaneous writers. | ||
| 116 | * | ||
| 117 | * Return: The newly allocated ID, -ENOMEM if memory allocation failed, | ||
| 118 | * or -ENOSPC if no free IDs could be found. | ||
| 119 | */ | ||
| 120 | int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) | ||
| 121 | { | ||
| 122 | u32 id = idr->idr_next; | ||
| 123 | int err, max = end > 0 ? end - 1 : INT_MAX; | ||
| 55 | 124 | ||
| 56 | id = idr_alloc(idr, ptr, curr, end, gfp); | 125 | if ((int)id < start) |
| 57 | if ((id == -ENOSPC) && (curr > start)) | 126 | id = start; |
| 58 | id = idr_alloc(idr, ptr, start, curr, gfp); | ||
| 59 | 127 | ||
| 60 | if (id >= 0) | 128 | err = idr_alloc_u32(idr, ptr, &id, max, gfp); |
| 61 | idr->idr_next = id + 1U; | 129 | if ((err == -ENOSPC) && (id > start)) { |
| 130 | id = start; | ||
| 131 | err = idr_alloc_u32(idr, ptr, &id, max, gfp); | ||
| 132 | } | ||
| 133 | if (err) | ||
| 134 | return err; | ||
| 62 | 135 | ||
| 136 | idr->idr_next = id + 1; | ||
| 63 | return id; | 137 | return id; |
| 64 | } | 138 | } |
| 65 | EXPORT_SYMBOL(idr_alloc_cyclic); | 139 | EXPORT_SYMBOL(idr_alloc_cyclic); |
| 66 | 140 | ||
| 67 | /** | 141 | /** |
| 68 | * idr_for_each - iterate through all stored pointers | 142 | * idr_remove() - Remove an ID from the IDR. |
| 69 | * @idr: idr handle | 143 | * @idr: IDR handle. |
| 70 | * @fn: function to be called for each pointer | 144 | * @id: Pointer ID. |
| 71 | * @data: data passed to callback function | 145 | * |
| 146 | * Removes this ID from the IDR. If the ID was not previously in the IDR, | ||
| 147 | * this function returns %NULL. | ||
| 148 | * | ||
| 149 | * Since this function modifies the IDR, the caller should provide their | ||
| 150 | * own locking to ensure that concurrent modification of the same IDR is | ||
| 151 | * not possible. | ||
| 152 | * | ||
| 153 | * Return: The pointer formerly associated with this ID. | ||
| 154 | */ | ||
| 155 | void *idr_remove(struct idr *idr, unsigned long id) | ||
| 156 | { | ||
| 157 | return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL); | ||
| 158 | } | ||
| 159 | EXPORT_SYMBOL_GPL(idr_remove); | ||
| 160 | |||
| 161 | /** | ||
| 162 | * idr_find() - Return pointer for given ID. | ||
| 163 | * @idr: IDR handle. | ||
| 164 | * @id: Pointer ID. | ||
| 165 | * | ||
| 166 | * Looks up the pointer associated with this ID. A %NULL pointer may | ||
| 167 | * indicate that @id is not allocated or that the %NULL pointer was | ||
| 168 | * associated with this ID. | ||
| 169 | * | ||
| 170 | * This function can be called under rcu_read_lock(), given that the leaf | ||
| 171 | * pointers lifetimes are correctly managed. | ||
| 172 | * | ||
| 173 | * Return: The pointer associated with this ID. | ||
| 174 | */ | ||
| 175 | void *idr_find(const struct idr *idr, unsigned long id) | ||
| 176 | { | ||
| 177 | return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base); | ||
| 178 | } | ||
| 179 | EXPORT_SYMBOL_GPL(idr_find); | ||
| 180 | |||
| 181 | /** | ||
| 182 | * idr_for_each() - Iterate through all stored pointers. | ||
| 183 | * @idr: IDR handle. | ||
| 184 | * @fn: Function to be called for each pointer. | ||
| 185 | * @data: Data passed to callback function. | ||
| 72 | * | 186 | * |
| 73 | * The callback function will be called for each entry in @idr, passing | 187 | * The callback function will be called for each entry in @idr, passing |
| 74 | * the id, the pointer and the data pointer passed to this function. | 188 | * the ID, the entry and @data. |
| 75 | * | 189 | * |
| 76 | * If @fn returns anything other than %0, the iteration stops and that | 190 | * If @fn returns anything other than %0, the iteration stops and that |
| 77 | * value is returned from this function. | 191 | * value is returned from this function. |
| @@ -86,9 +200,14 @@ int idr_for_each(const struct idr *idr, | |||
| 86 | { | 200 | { |
| 87 | struct radix_tree_iter iter; | 201 | struct radix_tree_iter iter; |
| 88 | void __rcu **slot; | 202 | void __rcu **slot; |
| 203 | int base = idr->idr_base; | ||
| 89 | 204 | ||
| 90 | radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { | 205 | radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { |
| 91 | int ret = fn(iter.index, rcu_dereference_raw(*slot), data); | 206 | int ret; |
| 207 | |||
| 208 | if (WARN_ON_ONCE(iter.index > INT_MAX)) | ||
| 209 | break; | ||
| 210 | ret = fn(iter.index + base, rcu_dereference_raw(*slot), data); | ||
| 92 | if (ret) | 211 | if (ret) |
| 93 | return ret; | 212 | return ret; |
| 94 | } | 213 | } |
| @@ -98,9 +217,9 @@ int idr_for_each(const struct idr *idr, | |||
| 98 | EXPORT_SYMBOL(idr_for_each); | 217 | EXPORT_SYMBOL(idr_for_each); |
| 99 | 218 | ||
| 100 | /** | 219 | /** |
| 101 | * idr_get_next - Find next populated entry | 220 | * idr_get_next() - Find next populated entry. |
| 102 | * @idr: idr handle | 221 | * @idr: IDR handle. |
| 103 | * @nextid: Pointer to lowest possible ID to return | 222 | * @nextid: Pointer to an ID. |
| 104 | * | 223 | * |
| 105 | * Returns the next populated entry in the tree with an ID greater than | 224 | * Returns the next populated entry in the tree with an ID greater than |
| 106 | * or equal to the value pointed to by @nextid. On exit, @nextid is updated | 225 | * or equal to the value pointed to by @nextid. On exit, @nextid is updated |
| @@ -111,35 +230,55 @@ void *idr_get_next(struct idr *idr, int *nextid) | |||
| 111 | { | 230 | { |
| 112 | struct radix_tree_iter iter; | 231 | struct radix_tree_iter iter; |
| 113 | void __rcu **slot; | 232 | void __rcu **slot; |
| 233 | int base = idr->idr_base; | ||
| 234 | int id = *nextid; | ||
| 114 | 235 | ||
| 115 | slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid); | 236 | id = (id < base) ? 0 : id - base; |
| 237 | slot = radix_tree_iter_find(&idr->idr_rt, &iter, id); | ||
| 116 | if (!slot) | 238 | if (!slot) |
| 117 | return NULL; | 239 | return NULL; |
| 240 | id = iter.index + base; | ||
| 241 | |||
| 242 | if (WARN_ON_ONCE(id > INT_MAX)) | ||
| 243 | return NULL; | ||
| 118 | 244 | ||
| 119 | *nextid = iter.index; | 245 | *nextid = id; |
| 120 | return rcu_dereference_raw(*slot); | 246 | return rcu_dereference_raw(*slot); |
| 121 | } | 247 | } |
| 122 | EXPORT_SYMBOL(idr_get_next); | 248 | EXPORT_SYMBOL(idr_get_next); |
| 123 | 249 | ||
| 124 | void *idr_get_next_ext(struct idr *idr, unsigned long *nextid) | 250 | /** |
| 251 | * idr_get_next_ul() - Find next populated entry. | ||
| 252 | * @idr: IDR handle. | ||
| 253 | * @nextid: Pointer to an ID. | ||
| 254 | * | ||
| 255 | * Returns the next populated entry in the tree with an ID greater than | ||
| 256 | * or equal to the value pointed to by @nextid. On exit, @nextid is updated | ||
| 257 | * to the ID of the found value. To use in a loop, the value pointed to by | ||
| 258 | * nextid must be incremented by the user. | ||
| 259 | */ | ||
| 260 | void *idr_get_next_ul(struct idr *idr, unsigned long *nextid) | ||
| 125 | { | 261 | { |
| 126 | struct radix_tree_iter iter; | 262 | struct radix_tree_iter iter; |
| 127 | void __rcu **slot; | 263 | void __rcu **slot; |
| 264 | unsigned long base = idr->idr_base; | ||
| 265 | unsigned long id = *nextid; | ||
| 128 | 266 | ||
| 129 | slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid); | 267 | id = (id < base) ? 0 : id - base; |
| 268 | slot = radix_tree_iter_find(&idr->idr_rt, &iter, id); | ||
| 130 | if (!slot) | 269 | if (!slot) |
| 131 | return NULL; | 270 | return NULL; |
| 132 | 271 | ||
| 133 | *nextid = iter.index; | 272 | *nextid = iter.index + base; |
| 134 | return rcu_dereference_raw(*slot); | 273 | return rcu_dereference_raw(*slot); |
| 135 | } | 274 | } |
| 136 | EXPORT_SYMBOL(idr_get_next_ext); | 275 | EXPORT_SYMBOL(idr_get_next_ul); |
| 137 | 276 | ||
| 138 | /** | 277 | /** |
| 139 | * idr_replace - replace pointer for given id | 278 | * idr_replace() - replace pointer for given ID. |
| 140 | * @idr: idr handle | 279 | * @idr: IDR handle. |
| 141 | * @ptr: New pointer to associate with the ID | 280 | * @ptr: New pointer to associate with the ID. |
| 142 | * @id: Lookup key | 281 | * @id: ID to change. |
| 143 | * | 282 | * |
| 144 | * Replace the pointer registered with an ID and return the old value. | 283 | * Replace the pointer registered with an ID and return the old value. |
| 145 | * This function can be called under the RCU read lock concurrently with | 284 | * This function can be called under the RCU read lock concurrently with |
| @@ -147,18 +286,9 @@ EXPORT_SYMBOL(idr_get_next_ext); | |||
| 147 | * the one being replaced!). | 286 | * the one being replaced!). |
| 148 | * | 287 | * |
| 149 | * Returns: the old value on success. %-ENOENT indicates that @id was not | 288 | * Returns: the old value on success. %-ENOENT indicates that @id was not |
| 150 | * found. %-EINVAL indicates that @id or @ptr were not valid. | 289 | * found. %-EINVAL indicates that @ptr was not valid. |
| 151 | */ | 290 | */ |
| 152 | void *idr_replace(struct idr *idr, void *ptr, int id) | 291 | void *idr_replace(struct idr *idr, void *ptr, unsigned long id) |
| 153 | { | ||
| 154 | if (id < 0) | ||
| 155 | return ERR_PTR(-EINVAL); | ||
| 156 | |||
| 157 | return idr_replace_ext(idr, ptr, id); | ||
| 158 | } | ||
| 159 | EXPORT_SYMBOL(idr_replace); | ||
| 160 | |||
| 161 | void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id) | ||
| 162 | { | 292 | { |
| 163 | struct radix_tree_node *node; | 293 | struct radix_tree_node *node; |
| 164 | void __rcu **slot = NULL; | 294 | void __rcu **slot = NULL; |
| @@ -166,6 +296,7 @@ void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id) | |||
| 166 | 296 | ||
| 167 | if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) | 297 | if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) |
| 168 | return ERR_PTR(-EINVAL); | 298 | return ERR_PTR(-EINVAL); |
| 299 | id -= idr->idr_base; | ||
| 169 | 300 | ||
| 170 | entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot); | 301 | entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot); |
| 171 | if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) | 302 | if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) |
| @@ -175,7 +306,7 @@ void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id) | |||
| 175 | 306 | ||
| 176 | return entry; | 307 | return entry; |
| 177 | } | 308 | } |
| 178 | EXPORT_SYMBOL(idr_replace_ext); | 309 | EXPORT_SYMBOL(idr_replace); |
| 179 | 310 | ||
| 180 | /** | 311 | /** |
| 181 | * DOC: IDA description | 312 | * DOC: IDA description |
| @@ -235,7 +366,7 @@ EXPORT_SYMBOL(idr_replace_ext); | |||
| 235 | * bitmap, which is excessive. | 366 | * bitmap, which is excessive. |
| 236 | */ | 367 | */ |
| 237 | 368 | ||
| 238 | #define IDA_MAX (0x80000000U / IDA_BITMAP_BITS) | 369 | #define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1) |
| 239 | 370 | ||
| 240 | /** | 371 | /** |
| 241 | * ida_get_new_above - allocate new ID above or equal to a start id | 372 | * ida_get_new_above - allocate new ID above or equal to a start id |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index c8d55565fafa..0a7ae3288a24 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | 24 | ||
| 25 | #include <linux/bitmap.h> | 25 | #include <linux/bitmap.h> |
| 26 | #include <linux/bitops.h> | 26 | #include <linux/bitops.h> |
| 27 | #include <linux/bug.h> | ||
| 27 | #include <linux/cpu.h> | 28 | #include <linux/cpu.h> |
| 28 | #include <linux/errno.h> | 29 | #include <linux/errno.h> |
| 29 | #include <linux/export.h> | 30 | #include <linux/export.h> |
| @@ -2135,7 +2136,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) | |||
| 2135 | } | 2136 | } |
| 2136 | EXPORT_SYMBOL(ida_pre_get); | 2137 | EXPORT_SYMBOL(ida_pre_get); |
| 2137 | 2138 | ||
| 2138 | void __rcu **idr_get_free_cmn(struct radix_tree_root *root, | 2139 | void __rcu **idr_get_free(struct radix_tree_root *root, |
| 2139 | struct radix_tree_iter *iter, gfp_t gfp, | 2140 | struct radix_tree_iter *iter, gfp_t gfp, |
| 2140 | unsigned long max) | 2141 | unsigned long max) |
| 2141 | { | 2142 | { |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 52622a3d2517..eba6682727dd 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
| @@ -78,7 +78,7 @@ static void free_tcf(struct tc_action *p) | |||
| 78 | static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p) | 78 | static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p) |
| 79 | { | 79 | { |
| 80 | spin_lock_bh(&idrinfo->lock); | 80 | spin_lock_bh(&idrinfo->lock); |
| 81 | idr_remove_ext(&idrinfo->action_idr, p->tcfa_index); | 81 | idr_remove(&idrinfo->action_idr, p->tcfa_index); |
| 82 | spin_unlock_bh(&idrinfo->lock); | 82 | spin_unlock_bh(&idrinfo->lock); |
| 83 | gen_kill_estimator(&p->tcfa_rate_est); | 83 | gen_kill_estimator(&p->tcfa_rate_est); |
| 84 | free_tcf(p); | 84 | free_tcf(p); |
| @@ -124,7 +124,7 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, | |||
| 124 | 124 | ||
| 125 | s_i = cb->args[0]; | 125 | s_i = cb->args[0]; |
| 126 | 126 | ||
| 127 | idr_for_each_entry_ext(idr, p, id) { | 127 | idr_for_each_entry_ul(idr, p, id) { |
| 128 | index++; | 128 | index++; |
| 129 | if (index < s_i) | 129 | if (index < s_i) |
| 130 | continue; | 130 | continue; |
| @@ -181,7 +181,7 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, | |||
| 181 | if (nla_put_string(skb, TCA_KIND, ops->kind)) | 181 | if (nla_put_string(skb, TCA_KIND, ops->kind)) |
| 182 | goto nla_put_failure; | 182 | goto nla_put_failure; |
| 183 | 183 | ||
| 184 | idr_for_each_entry_ext(idr, p, id) { | 184 | idr_for_each_entry_ul(idr, p, id) { |
| 185 | ret = __tcf_idr_release(p, false, true); | 185 | ret = __tcf_idr_release(p, false, true); |
| 186 | if (ret == ACT_P_DELETED) { | 186 | if (ret == ACT_P_DELETED) { |
| 187 | module_put(ops->owner); | 187 | module_put(ops->owner); |
| @@ -222,7 +222,7 @@ static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo) | |||
| 222 | struct tc_action *p = NULL; | 222 | struct tc_action *p = NULL; |
| 223 | 223 | ||
| 224 | spin_lock_bh(&idrinfo->lock); | 224 | spin_lock_bh(&idrinfo->lock); |
| 225 | p = idr_find_ext(&idrinfo->action_idr, index); | 225 | p = idr_find(&idrinfo->action_idr, index); |
| 226 | spin_unlock_bh(&idrinfo->lock); | 226 | spin_unlock_bh(&idrinfo->lock); |
| 227 | 227 | ||
| 228 | return p; | 228 | return p; |
| @@ -274,7 +274,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, | |||
| 274 | struct tcf_idrinfo *idrinfo = tn->idrinfo; | 274 | struct tcf_idrinfo *idrinfo = tn->idrinfo; |
| 275 | struct idr *idr = &idrinfo->action_idr; | 275 | struct idr *idr = &idrinfo->action_idr; |
| 276 | int err = -ENOMEM; | 276 | int err = -ENOMEM; |
| 277 | unsigned long idr_index; | ||
| 278 | 277 | ||
| 279 | if (unlikely(!p)) | 278 | if (unlikely(!p)) |
| 280 | return -ENOMEM; | 279 | return -ENOMEM; |
| @@ -284,45 +283,28 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, | |||
| 284 | 283 | ||
| 285 | if (cpustats) { | 284 | if (cpustats) { |
| 286 | p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); | 285 | p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); |
| 287 | if (!p->cpu_bstats) { | 286 | if (!p->cpu_bstats) |
| 288 | err1: | ||
| 289 | kfree(p); | ||
| 290 | return err; | ||
| 291 | } | ||
| 292 | p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); | ||
| 293 | if (!p->cpu_qstats) { | ||
| 294 | err2: | ||
| 295 | free_percpu(p->cpu_bstats); | ||
| 296 | goto err1; | 287 | goto err1; |
| 297 | } | 288 | p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); |
| 289 | if (!p->cpu_qstats) | ||
| 290 | goto err2; | ||
| 298 | } | 291 | } |
| 299 | spin_lock_init(&p->tcfa_lock); | 292 | spin_lock_init(&p->tcfa_lock); |
| 293 | idr_preload(GFP_KERNEL); | ||
| 294 | spin_lock_bh(&idrinfo->lock); | ||
| 300 | /* user doesn't specify an index */ | 295 | /* user doesn't specify an index */ |
| 301 | if (!index) { | 296 | if (!index) { |
| 302 | idr_preload(GFP_KERNEL); | 297 | index = 1; |
| 303 | spin_lock_bh(&idrinfo->lock); | 298 | err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC); |
| 304 | err = idr_alloc_ext(idr, NULL, &idr_index, 1, 0, | ||
| 305 | GFP_ATOMIC); | ||
| 306 | spin_unlock_bh(&idrinfo->lock); | ||
| 307 | idr_preload_end(); | ||
| 308 | if (err) { | ||
| 309 | err3: | ||
| 310 | free_percpu(p->cpu_qstats); | ||
| 311 | goto err2; | ||
| 312 | } | ||
| 313 | p->tcfa_index = idr_index; | ||
| 314 | } else { | 299 | } else { |
| 315 | idr_preload(GFP_KERNEL); | 300 | err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC); |
| 316 | spin_lock_bh(&idrinfo->lock); | ||
| 317 | err = idr_alloc_ext(idr, NULL, NULL, index, index + 1, | ||
| 318 | GFP_ATOMIC); | ||
| 319 | spin_unlock_bh(&idrinfo->lock); | ||
| 320 | idr_preload_end(); | ||
| 321 | if (err) | ||
| 322 | goto err3; | ||
| 323 | p->tcfa_index = index; | ||
| 324 | } | 301 | } |
| 302 | spin_unlock_bh(&idrinfo->lock); | ||
| 303 | idr_preload_end(); | ||
| 304 | if (err) | ||
| 305 | goto err3; | ||
| 325 | 306 | ||
| 307 | p->tcfa_index = index; | ||
| 326 | p->tcfa_tm.install = jiffies; | 308 | p->tcfa_tm.install = jiffies; |
| 327 | p->tcfa_tm.lastuse = jiffies; | 309 | p->tcfa_tm.lastuse = jiffies; |
| 328 | p->tcfa_tm.firstuse = 0; | 310 | p->tcfa_tm.firstuse = 0; |
| @@ -330,9 +312,8 @@ err3: | |||
| 330 | err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, | 312 | err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, |
| 331 | &p->tcfa_rate_est, | 313 | &p->tcfa_rate_est, |
| 332 | &p->tcfa_lock, NULL, est); | 314 | &p->tcfa_lock, NULL, est); |
| 333 | if (err) { | 315 | if (err) |
| 334 | goto err3; | 316 | goto err4; |
| 335 | } | ||
| 336 | } | 317 | } |
| 337 | 318 | ||
| 338 | p->idrinfo = idrinfo; | 319 | p->idrinfo = idrinfo; |
| @@ -340,6 +321,15 @@ err3: | |||
| 340 | INIT_LIST_HEAD(&p->list); | 321 | INIT_LIST_HEAD(&p->list); |
| 341 | *a = p; | 322 | *a = p; |
| 342 | return 0; | 323 | return 0; |
| 324 | err4: | ||
| 325 | idr_remove(idr, index); | ||
| 326 | err3: | ||
| 327 | free_percpu(p->cpu_qstats); | ||
| 328 | err2: | ||
| 329 | free_percpu(p->cpu_bstats); | ||
| 330 | err1: | ||
| 331 | kfree(p); | ||
| 332 | return err; | ||
| 343 | } | 333 | } |
| 344 | EXPORT_SYMBOL(tcf_idr_create); | 334 | EXPORT_SYMBOL(tcf_idr_create); |
| 345 | 335 | ||
| @@ -348,7 +338,7 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a) | |||
| 348 | struct tcf_idrinfo *idrinfo = tn->idrinfo; | 338 | struct tcf_idrinfo *idrinfo = tn->idrinfo; |
| 349 | 339 | ||
| 350 | spin_lock_bh(&idrinfo->lock); | 340 | spin_lock_bh(&idrinfo->lock); |
| 351 | idr_replace_ext(&idrinfo->action_idr, a, a->tcfa_index); | 341 | idr_replace(&idrinfo->action_idr, a, a->tcfa_index); |
| 352 | spin_unlock_bh(&idrinfo->lock); | 342 | spin_unlock_bh(&idrinfo->lock); |
| 353 | } | 343 | } |
| 354 | EXPORT_SYMBOL(tcf_idr_insert); | 344 | EXPORT_SYMBOL(tcf_idr_insert); |
| @@ -361,7 +351,7 @@ void tcf_idrinfo_destroy(const struct tc_action_ops *ops, | |||
| 361 | int ret; | 351 | int ret; |
| 362 | unsigned long id = 1; | 352 | unsigned long id = 1; |
| 363 | 353 | ||
| 364 | idr_for_each_entry_ext(idr, p, id) { | 354 | idr_for_each_entry_ul(idr, p, id) { |
| 365 | ret = __tcf_idr_release(p, false, true); | 355 | ret = __tcf_idr_release(p, false, true); |
| 366 | if (ret == ACT_P_DELETED) | 356 | if (ret == ACT_P_DELETED) |
| 367 | module_put(ops->owner); | 357 | module_put(ops->owner); |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index bcb4ccb5f894..2bc1bc23d42e 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
| @@ -381,8 +381,8 @@ static int tcf_block_insert(struct tcf_block *block, struct net *net, | |||
| 381 | struct tcf_net *tn = net_generic(net, tcf_net_id); | 381 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 382 | int err; | 382 | int err; |
| 383 | 383 | ||
| 384 | err = idr_alloc_ext(&tn->idr, block, NULL, block_index, | 384 | err = idr_alloc_u32(&tn->idr, block, &block_index, block_index, |
| 385 | block_index + 1, GFP_KERNEL); | 385 | GFP_KERNEL); |
| 386 | if (err) | 386 | if (err) |
| 387 | return err; | 387 | return err; |
| 388 | block->index = block_index; | 388 | block->index = block_index; |
| @@ -393,7 +393,7 @@ static void tcf_block_remove(struct tcf_block *block, struct net *net) | |||
| 393 | { | 393 | { |
| 394 | struct tcf_net *tn = net_generic(net, tcf_net_id); | 394 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 395 | 395 | ||
| 396 | idr_remove_ext(&tn->idr, block->index); | 396 | idr_remove(&tn->idr, block->index); |
| 397 | } | 397 | } |
| 398 | 398 | ||
| 399 | static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, | 399 | static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, |
| @@ -434,7 +434,7 @@ static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) | |||
| 434 | { | 434 | { |
| 435 | struct tcf_net *tn = net_generic(net, tcf_net_id); | 435 | struct tcf_net *tn = net_generic(net, tcf_net_id); |
| 436 | 436 | ||
| 437 | return idr_find_ext(&tn->idr, block_index); | 437 | return idr_find(&tn->idr, block_index); |
| 438 | } | 438 | } |
| 439 | 439 | ||
| 440 | static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block) | 440 | static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block) |
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index d333f5c5101d..6b7ab3512f5b 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c | |||
| @@ -120,7 +120,7 @@ static void basic_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack) | |||
| 120 | list_for_each_entry_safe(f, n, &head->flist, link) { | 120 | list_for_each_entry_safe(f, n, &head->flist, link) { |
| 121 | list_del_rcu(&f->link); | 121 | list_del_rcu(&f->link); |
| 122 | tcf_unbind_filter(tp, &f->res); | 122 | tcf_unbind_filter(tp, &f->res); |
| 123 | idr_remove_ext(&head->handle_idr, f->handle); | 123 | idr_remove(&head->handle_idr, f->handle); |
| 124 | if (tcf_exts_get_net(&f->exts)) | 124 | if (tcf_exts_get_net(&f->exts)) |
| 125 | call_rcu(&f->rcu, basic_delete_filter); | 125 | call_rcu(&f->rcu, basic_delete_filter); |
| 126 | else | 126 | else |
| @@ -138,7 +138,7 @@ static int basic_delete(struct tcf_proto *tp, void *arg, bool *last, | |||
| 138 | 138 | ||
| 139 | list_del_rcu(&f->link); | 139 | list_del_rcu(&f->link); |
| 140 | tcf_unbind_filter(tp, &f->res); | 140 | tcf_unbind_filter(tp, &f->res); |
| 141 | idr_remove_ext(&head->handle_idr, f->handle); | 141 | idr_remove(&head->handle_idr, f->handle); |
| 142 | tcf_exts_get_net(&f->exts); | 142 | tcf_exts_get_net(&f->exts); |
| 143 | call_rcu(&f->rcu, basic_delete_filter); | 143 | call_rcu(&f->rcu, basic_delete_filter); |
| 144 | *last = list_empty(&head->flist); | 144 | *last = list_empty(&head->flist); |
| @@ -185,7 +185,6 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, | |||
| 185 | struct nlattr *tb[TCA_BASIC_MAX + 1]; | 185 | struct nlattr *tb[TCA_BASIC_MAX + 1]; |
| 186 | struct basic_filter *fold = (struct basic_filter *) *arg; | 186 | struct basic_filter *fold = (struct basic_filter *) *arg; |
| 187 | struct basic_filter *fnew; | 187 | struct basic_filter *fnew; |
| 188 | unsigned long idr_index; | ||
| 189 | 188 | ||
| 190 | if (tca[TCA_OPTIONS] == NULL) | 189 | if (tca[TCA_OPTIONS] == NULL) |
| 191 | return -EINVAL; | 190 | return -EINVAL; |
| @@ -208,34 +207,30 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, | |||
| 208 | if (err < 0) | 207 | if (err < 0) |
| 209 | goto errout; | 208 | goto errout; |
| 210 | 209 | ||
| 211 | if (handle) { | 210 | if (!handle) { |
| 212 | fnew->handle = handle; | 211 | handle = 1; |
| 213 | if (!fold) { | 212 | err = idr_alloc_u32(&head->handle_idr, fnew, &handle, |
| 214 | err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index, | 213 | INT_MAX, GFP_KERNEL); |
| 215 | handle, handle + 1, GFP_KERNEL); | 214 | } else if (!fold) { |
| 216 | if (err) | 215 | err = idr_alloc_u32(&head->handle_idr, fnew, &handle, |
| 217 | goto errout; | 216 | handle, GFP_KERNEL); |
| 218 | } | ||
| 219 | } else { | ||
| 220 | err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index, | ||
| 221 | 1, 0x7FFFFFFF, GFP_KERNEL); | ||
| 222 | if (err) | ||
| 223 | goto errout; | ||
| 224 | fnew->handle = idr_index; | ||
| 225 | } | 217 | } |
| 218 | if (err) | ||
| 219 | goto errout; | ||
| 220 | fnew->handle = handle; | ||
| 226 | 221 | ||
| 227 | err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr, | 222 | err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr, |
| 228 | extack); | 223 | extack); |
| 229 | if (err < 0) { | 224 | if (err < 0) { |
| 230 | if (!fold) | 225 | if (!fold) |
| 231 | idr_remove_ext(&head->handle_idr, fnew->handle); | 226 | idr_remove(&head->handle_idr, fnew->handle); |
| 232 | goto errout; | 227 | goto errout; |
| 233 | } | 228 | } |
| 234 | 229 | ||
| 235 | *arg = fnew; | 230 | *arg = fnew; |
| 236 | 231 | ||
| 237 | if (fold) { | 232 | if (fold) { |
| 238 | idr_replace_ext(&head->handle_idr, fnew, fnew->handle); | 233 | idr_replace(&head->handle_idr, fnew, fnew->handle); |
| 239 | list_replace_rcu(&fold->link, &fnew->link); | 234 | list_replace_rcu(&fold->link, &fnew->link); |
| 240 | tcf_unbind_filter(tp, &fold->res); | 235 | tcf_unbind_filter(tp, &fold->res); |
| 241 | tcf_exts_get_net(&fold->exts); | 236 | tcf_exts_get_net(&fold->exts); |
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 8e5326bc6440..b07c1fa8bc0d 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c | |||
| @@ -295,7 +295,7 @@ static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog, | |||
| 295 | { | 295 | { |
| 296 | struct cls_bpf_head *head = rtnl_dereference(tp->root); | 296 | struct cls_bpf_head *head = rtnl_dereference(tp->root); |
| 297 | 297 | ||
| 298 | idr_remove_ext(&head->handle_idr, prog->handle); | 298 | idr_remove(&head->handle_idr, prog->handle); |
| 299 | cls_bpf_stop_offload(tp, prog, extack); | 299 | cls_bpf_stop_offload(tp, prog, extack); |
| 300 | list_del_rcu(&prog->link); | 300 | list_del_rcu(&prog->link); |
| 301 | tcf_unbind_filter(tp, &prog->res); | 301 | tcf_unbind_filter(tp, &prog->res); |
| @@ -471,7 +471,6 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, | |||
| 471 | struct cls_bpf_prog *oldprog = *arg; | 471 | struct cls_bpf_prog *oldprog = *arg; |
| 472 | struct nlattr *tb[TCA_BPF_MAX + 1]; | 472 | struct nlattr *tb[TCA_BPF_MAX + 1]; |
| 473 | struct cls_bpf_prog *prog; | 473 | struct cls_bpf_prog *prog; |
| 474 | unsigned long idr_index; | ||
| 475 | int ret; | 474 | int ret; |
| 476 | 475 | ||
| 477 | if (tca[TCA_OPTIONS] == NULL) | 476 | if (tca[TCA_OPTIONS] == NULL) |
| @@ -498,21 +497,18 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, | |||
| 498 | } | 497 | } |
| 499 | 498 | ||
| 500 | if (handle == 0) { | 499 | if (handle == 0) { |
| 501 | ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index, | 500 | handle = 1; |
| 502 | 1, 0x7FFFFFFF, GFP_KERNEL); | 501 | ret = idr_alloc_u32(&head->handle_idr, prog, &handle, |
| 503 | if (ret) | 502 | INT_MAX, GFP_KERNEL); |
| 504 | goto errout; | 503 | } else if (!oldprog) { |
| 505 | prog->handle = idr_index; | 504 | ret = idr_alloc_u32(&head->handle_idr, prog, &handle, |
| 506 | } else { | 505 | handle, GFP_KERNEL); |
| 507 | if (!oldprog) { | ||
| 508 | ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index, | ||
| 509 | handle, handle + 1, GFP_KERNEL); | ||
| 510 | if (ret) | ||
| 511 | goto errout; | ||
| 512 | } | ||
| 513 | prog->handle = handle; | ||
| 514 | } | 506 | } |
| 515 | 507 | ||
| 508 | if (ret) | ||
| 509 | goto errout; | ||
| 510 | prog->handle = handle; | ||
| 511 | |||
| 516 | ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr, | 512 | ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr, |
| 517 | extack); | 513 | extack); |
| 518 | if (ret < 0) | 514 | if (ret < 0) |
| @@ -526,7 +522,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, | |||
| 526 | prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; | 522 | prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; |
| 527 | 523 | ||
| 528 | if (oldprog) { | 524 | if (oldprog) { |
| 529 | idr_replace_ext(&head->handle_idr, prog, handle); | 525 | idr_replace(&head->handle_idr, prog, handle); |
| 530 | list_replace_rcu(&oldprog->link, &prog->link); | 526 | list_replace_rcu(&oldprog->link, &prog->link); |
| 531 | tcf_unbind_filter(tp, &oldprog->res); | 527 | tcf_unbind_filter(tp, &oldprog->res); |
| 532 | tcf_exts_get_net(&oldprog->exts); | 528 | tcf_exts_get_net(&oldprog->exts); |
| @@ -542,7 +538,7 @@ errout_parms: | |||
| 542 | cls_bpf_free_parms(prog); | 538 | cls_bpf_free_parms(prog); |
| 543 | errout_idr: | 539 | errout_idr: |
| 544 | if (!oldprog) | 540 | if (!oldprog) |
| 545 | idr_remove_ext(&head->handle_idr, prog->handle); | 541 | idr_remove(&head->handle_idr, prog->handle); |
| 546 | errout: | 542 | errout: |
| 547 | tcf_exts_destroy(&prog->exts); | 543 | tcf_exts_destroy(&prog->exts); |
| 548 | kfree(prog); | 544 | kfree(prog); |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index dc9acaafc0a8..7d0ce2c40f93 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
| @@ -288,7 +288,7 @@ static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, | |||
| 288 | { | 288 | { |
| 289 | struct cls_fl_head *head = rtnl_dereference(tp->root); | 289 | struct cls_fl_head *head = rtnl_dereference(tp->root); |
| 290 | 290 | ||
| 291 | idr_remove_ext(&head->handle_idr, f->handle); | 291 | idr_remove(&head->handle_idr, f->handle); |
| 292 | list_del_rcu(&f->list); | 292 | list_del_rcu(&f->list); |
| 293 | if (!tc_skip_hw(f->flags)) | 293 | if (!tc_skip_hw(f->flags)) |
| 294 | fl_hw_destroy_filter(tp, f, extack); | 294 | fl_hw_destroy_filter(tp, f, extack); |
| @@ -334,7 +334,7 @@ static void *fl_get(struct tcf_proto *tp, u32 handle) | |||
| 334 | { | 334 | { |
| 335 | struct cls_fl_head *head = rtnl_dereference(tp->root); | 335 | struct cls_fl_head *head = rtnl_dereference(tp->root); |
| 336 | 336 | ||
| 337 | return idr_find_ext(&head->handle_idr, handle); | 337 | return idr_find(&head->handle_idr, handle); |
| 338 | } | 338 | } |
| 339 | 339 | ||
| 340 | static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { | 340 | static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { |
| @@ -865,7 +865,6 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
| 865 | struct cls_fl_filter *fnew; | 865 | struct cls_fl_filter *fnew; |
| 866 | struct nlattr **tb; | 866 | struct nlattr **tb; |
| 867 | struct fl_flow_mask mask = {}; | 867 | struct fl_flow_mask mask = {}; |
| 868 | unsigned long idr_index; | ||
| 869 | int err; | 868 | int err; |
| 870 | 869 | ||
| 871 | if (!tca[TCA_OPTIONS]) | 870 | if (!tca[TCA_OPTIONS]) |
| @@ -896,21 +895,17 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
| 896 | goto errout; | 895 | goto errout; |
| 897 | 896 | ||
| 898 | if (!handle) { | 897 | if (!handle) { |
| 899 | err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index, | 898 | handle = 1; |
| 900 | 1, 0x80000000, GFP_KERNEL); | 899 | err = idr_alloc_u32(&head->handle_idr, fnew, &handle, |
| 901 | if (err) | 900 | INT_MAX, GFP_KERNEL); |
| 902 | goto errout; | 901 | } else if (!fold) { |
| 903 | fnew->handle = idr_index; | 902 | /* user specifies a handle and it doesn't exist */ |
| 904 | } | 903 | err = idr_alloc_u32(&head->handle_idr, fnew, &handle, |
| 905 | 904 | handle, GFP_KERNEL); | |
| 906 | /* user specifies a handle and it doesn't exist */ | ||
| 907 | if (handle && !fold) { | ||
| 908 | err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index, | ||
| 909 | handle, handle + 1, GFP_KERNEL); | ||
| 910 | if (err) | ||
| 911 | goto errout; | ||
| 912 | fnew->handle = idr_index; | ||
| 913 | } | 905 | } |
| 906 | if (err) | ||
| 907 | goto errout; | ||
| 908 | fnew->handle = handle; | ||
| 914 | 909 | ||
| 915 | if (tb[TCA_FLOWER_FLAGS]) { | 910 | if (tb[TCA_FLOWER_FLAGS]) { |
| 916 | fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); | 911 | fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); |
| @@ -966,8 +961,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
| 966 | *arg = fnew; | 961 | *arg = fnew; |
| 967 | 962 | ||
| 968 | if (fold) { | 963 | if (fold) { |
| 969 | fnew->handle = handle; | 964 | idr_replace(&head->handle_idr, fnew, fnew->handle); |
| 970 | idr_replace_ext(&head->handle_idr, fnew, fnew->handle); | ||
| 971 | list_replace_rcu(&fold->list, &fnew->list); | 965 | list_replace_rcu(&fold->list, &fnew->list); |
| 972 | tcf_unbind_filter(tp, &fold->res); | 966 | tcf_unbind_filter(tp, &fold->res); |
| 973 | tcf_exts_get_net(&fold->exts); | 967 | tcf_exts_get_net(&fold->exts); |
| @@ -981,7 +975,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
| 981 | 975 | ||
| 982 | errout_idr: | 976 | errout_idr: |
| 983 | if (fnew->handle) | 977 | if (fnew->handle) |
| 984 | idr_remove_ext(&head->handle_idr, fnew->handle); | 978 | idr_remove(&head->handle_idr, fnew->handle); |
| 985 | errout: | 979 | errout: |
| 986 | tcf_exts_destroy(&fnew->exts); | 980 | tcf_exts_destroy(&fnew->exts); |
| 987 | kfree(fnew); | 981 | kfree(fnew); |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 6311a548046b..c07e8abc3d52 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
| @@ -316,19 +316,13 @@ static void *u32_get(struct tcf_proto *tp, u32 handle) | |||
| 316 | return u32_lookup_key(ht, handle); | 316 | return u32_lookup_key(ht, handle); |
| 317 | } | 317 | } |
| 318 | 318 | ||
| 319 | /* Protected by rtnl lock */ | ||
| 319 | static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr) | 320 | static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr) |
| 320 | { | 321 | { |
| 321 | unsigned long idr_index; | 322 | int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL); |
| 322 | int err; | 323 | if (id < 0) |
| 323 | |||
| 324 | /* This is only used inside rtnl lock it is safe to increment | ||
| 325 | * without read _copy_ update semantics | ||
| 326 | */ | ||
| 327 | err = idr_alloc_ext(&tp_c->handle_idr, ptr, &idr_index, | ||
| 328 | 1, 0x7FF, GFP_KERNEL); | ||
| 329 | if (err) | ||
| 330 | return 0; | 324 | return 0; |
| 331 | return (u32)(idr_index | 0x800) << 20; | 325 | return (id | 0x800U) << 20; |
| 332 | } | 326 | } |
| 333 | 327 | ||
| 334 | static struct hlist_head *tc_u_common_hash; | 328 | static struct hlist_head *tc_u_common_hash; |
| @@ -598,7 +592,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, | |||
| 598 | rtnl_dereference(n->next)); | 592 | rtnl_dereference(n->next)); |
| 599 | tcf_unbind_filter(tp, &n->res); | 593 | tcf_unbind_filter(tp, &n->res); |
| 600 | u32_remove_hw_knode(tp, n, extack); | 594 | u32_remove_hw_knode(tp, n, extack); |
| 601 | idr_remove_ext(&ht->handle_idr, n->handle); | 595 | idr_remove(&ht->handle_idr, n->handle); |
| 602 | if (tcf_exts_get_net(&n->exts)) | 596 | if (tcf_exts_get_net(&n->exts)) |
| 603 | call_rcu(&n->rcu, u32_delete_key_freepf_rcu); | 597 | call_rcu(&n->rcu, u32_delete_key_freepf_rcu); |
| 604 | else | 598 | else |
| @@ -625,7 +619,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, | |||
| 625 | if (phn == ht) { | 619 | if (phn == ht) { |
| 626 | u32_clear_hw_hnode(tp, ht, extack); | 620 | u32_clear_hw_hnode(tp, ht, extack); |
| 627 | idr_destroy(&ht->handle_idr); | 621 | idr_destroy(&ht->handle_idr); |
| 628 | idr_remove_ext(&tp_c->handle_idr, ht->handle); | 622 | idr_remove(&tp_c->handle_idr, ht->handle); |
| 629 | RCU_INIT_POINTER(*hn, ht->next); | 623 | RCU_INIT_POINTER(*hn, ht->next); |
| 630 | kfree_rcu(ht, rcu); | 624 | kfree_rcu(ht, rcu); |
| 631 | return 0; | 625 | return 0; |
| @@ -747,19 +741,17 @@ ret: | |||
| 747 | 741 | ||
| 748 | static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid) | 742 | static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid) |
| 749 | { | 743 | { |
| 750 | unsigned long idr_index; | 744 | u32 index = htid | 0x800; |
| 751 | u32 start = htid | 0x800; | ||
| 752 | u32 max = htid | 0xFFF; | 745 | u32 max = htid | 0xFFF; |
| 753 | u32 min = htid; | ||
| 754 | 746 | ||
| 755 | if (idr_alloc_ext(&ht->handle_idr, NULL, &idr_index, | 747 | if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) { |
| 756 | start, max + 1, GFP_KERNEL)) { | 748 | index = htid + 1; |
| 757 | if (idr_alloc_ext(&ht->handle_idr, NULL, &idr_index, | 749 | if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, |
| 758 | min + 1, max + 1, GFP_KERNEL)) | 750 | GFP_KERNEL)) |
| 759 | return max; | 751 | index = max; |
| 760 | } | 752 | } |
| 761 | 753 | ||
| 762 | return (u32)idr_index; | 754 | return index; |
| 763 | } | 755 | } |
| 764 | 756 | ||
| 765 | static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { | 757 | static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { |
| @@ -849,7 +841,7 @@ static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c, | |||
| 849 | if (pins->handle == n->handle) | 841 | if (pins->handle == n->handle) |
| 850 | break; | 842 | break; |
| 851 | 843 | ||
| 852 | idr_replace_ext(&ht->handle_idr, n, n->handle); | 844 | idr_replace(&ht->handle_idr, n, n->handle); |
| 853 | RCU_INIT_POINTER(n->next, pins->next); | 845 | RCU_INIT_POINTER(n->next, pins->next); |
| 854 | rcu_assign_pointer(*ins, n); | 846 | rcu_assign_pointer(*ins, n); |
| 855 | } | 847 | } |
| @@ -1010,8 +1002,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
| 1010 | return -ENOMEM; | 1002 | return -ENOMEM; |
| 1011 | } | 1003 | } |
| 1012 | } else { | 1004 | } else { |
| 1013 | err = idr_alloc_ext(&tp_c->handle_idr, ht, NULL, | 1005 | err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle, |
| 1014 | handle, handle + 1, GFP_KERNEL); | 1006 | handle, GFP_KERNEL); |
| 1015 | if (err) { | 1007 | if (err) { |
| 1016 | kfree(ht); | 1008 | kfree(ht); |
| 1017 | return err; | 1009 | return err; |
| @@ -1027,7 +1019,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
| 1027 | 1019 | ||
| 1028 | err = u32_replace_hw_hnode(tp, ht, flags, extack); | 1020 | err = u32_replace_hw_hnode(tp, ht, flags, extack); |
| 1029 | if (err) { | 1021 | if (err) { |
| 1030 | idr_remove_ext(&tp_c->handle_idr, handle); | 1022 | idr_remove(&tp_c->handle_idr, handle); |
| 1031 | kfree(ht); | 1023 | kfree(ht); |
| 1032 | return err; | 1024 | return err; |
| 1033 | } | 1025 | } |
| @@ -1067,8 +1059,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
| 1067 | return -EINVAL; | 1059 | return -EINVAL; |
| 1068 | } | 1060 | } |
| 1069 | handle = htid | TC_U32_NODE(handle); | 1061 | handle = htid | TC_U32_NODE(handle); |
| 1070 | err = idr_alloc_ext(&ht->handle_idr, NULL, NULL, | 1062 | err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle, |
| 1071 | handle, handle + 1, | ||
| 1072 | GFP_KERNEL); | 1063 | GFP_KERNEL); |
| 1073 | if (err) | 1064 | if (err) |
| 1074 | return err; | 1065 | return err; |
| @@ -1163,7 +1154,7 @@ errfree: | |||
| 1163 | #endif | 1154 | #endif |
| 1164 | kfree(n); | 1155 | kfree(n); |
| 1165 | erridr: | 1156 | erridr: |
| 1166 | idr_remove_ext(&ht->handle_idr, handle); | 1157 | idr_remove(&ht->handle_idr, handle); |
| 1167 | return err; | 1158 | return err; |
| 1168 | } | 1159 | } |
| 1169 | 1160 | ||
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index 30cd0b296f1a..44ef9eba5a7a 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c | |||
| @@ -153,11 +153,12 @@ void idr_nowait_test(void) | |||
| 153 | idr_destroy(&idr); | 153 | idr_destroy(&idr); |
| 154 | } | 154 | } |
| 155 | 155 | ||
| 156 | void idr_get_next_test(void) | 156 | void idr_get_next_test(int base) |
| 157 | { | 157 | { |
| 158 | unsigned long i; | 158 | unsigned long i; |
| 159 | int nextid; | 159 | int nextid; |
| 160 | DEFINE_IDR(idr); | 160 | DEFINE_IDR(idr); |
| 161 | idr_init_base(&idr, base); | ||
| 161 | 162 | ||
| 162 | int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0}; | 163 | int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0}; |
| 163 | 164 | ||
| @@ -207,6 +208,7 @@ void idr_checks(void) | |||
| 207 | assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); | 208 | assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); |
| 208 | } | 209 | } |
| 209 | assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC); | 210 | assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC); |
| 211 | assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i + 10, GFP_KERNEL) == -ENOSPC); | ||
| 210 | 212 | ||
| 211 | idr_for_each(&idr, item_idr_free, &idr); | 213 | idr_for_each(&idr, item_idr_free, &idr); |
| 212 | idr_destroy(&idr); | 214 | idr_destroy(&idr); |
| @@ -214,6 +216,23 @@ void idr_checks(void) | |||
| 214 | 216 | ||
| 215 | assert(idr_is_empty(&idr)); | 217 | assert(idr_is_empty(&idr)); |
| 216 | 218 | ||
| 219 | idr_set_cursor(&idr, INT_MAX - 3UL); | ||
| 220 | for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) { | ||
| 221 | struct item *item; | ||
| 222 | unsigned int id; | ||
| 223 | if (i <= INT_MAX) | ||
| 224 | item = item_create(i, 0); | ||
| 225 | else | ||
| 226 | item = item_create(i - INT_MAX - 1, 0); | ||
| 227 | |||
| 228 | id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL); | ||
| 229 | assert(id == item->index); | ||
| 230 | } | ||
| 231 | |||
| 232 | idr_for_each(&idr, item_idr_free, &idr); | ||
| 233 | idr_destroy(&idr); | ||
| 234 | assert(idr_is_empty(&idr)); | ||
| 235 | |||
| 217 | for (i = 1; i < 10000; i++) { | 236 | for (i = 1; i < 10000; i++) { |
| 218 | struct item *item = item_create(i, 0); | 237 | struct item *item = item_create(i, 0); |
| 219 | assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i); | 238 | assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i); |
| @@ -226,7 +245,9 @@ void idr_checks(void) | |||
| 226 | idr_alloc_test(); | 245 | idr_alloc_test(); |
| 227 | idr_null_test(); | 246 | idr_null_test(); |
| 228 | idr_nowait_test(); | 247 | idr_nowait_test(); |
| 229 | idr_get_next_test(); | 248 | idr_get_next_test(0); |
| 249 | idr_get_next_test(1); | ||
| 250 | idr_get_next_test(4); | ||
| 230 | } | 251 | } |
| 231 | 252 | ||
| 232 | /* | 253 | /* |
| @@ -380,7 +401,7 @@ void ida_check_random(void) | |||
| 380 | do { | 401 | do { |
| 381 | ida_pre_get(&ida, GFP_KERNEL); | 402 | ida_pre_get(&ida, GFP_KERNEL); |
| 382 | err = ida_get_new_above(&ida, bit, &id); | 403 | err = ida_get_new_above(&ida, bit, &id); |
| 383 | } while (err == -ENOMEM); | 404 | } while (err == -EAGAIN); |
| 384 | assert(!err); | 405 | assert(!err); |
| 385 | assert(id == bit); | 406 | assert(id == bit); |
| 386 | } | 407 | } |
| @@ -489,7 +510,7 @@ static void *ida_random_fn(void *arg) | |||
| 489 | 510 | ||
| 490 | void ida_thread_tests(void) | 511 | void ida_thread_tests(void) |
| 491 | { | 512 | { |
| 492 | pthread_t threads[10]; | 513 | pthread_t threads[20]; |
| 493 | int i; | 514 | int i; |
| 494 | 515 | ||
| 495 | for (i = 0; i < ARRAY_SIZE(threads); i++) | 516 | for (i = 0; i < ARRAY_SIZE(threads); i++) |
diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h index c3bc3f364f68..426f32f28547 100644 --- a/tools/testing/radix-tree/linux/kernel.h +++ b/tools/testing/radix-tree/linux/kernel.h | |||
| @@ -17,6 +17,4 @@ | |||
| 17 | #define pr_debug printk | 17 | #define pr_debug printk |
| 18 | #define pr_cont printk | 18 | #define pr_cont printk |
| 19 | 19 | ||
| 20 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) | ||
| 21 | |||
| 22 | #endif /* _KERNEL_H */ | 20 | #endif /* _KERNEL_H */ |
