aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <mawilcox@microsoft.com>2017-11-28 15:16:24 -0500
committerMatthew Wilcox <mawilcox@microsoft.com>2018-02-06 16:41:28 -0500
commit460488c58ca8b9167463ac22ec9a2e33db351962 (patch)
treea96e67ae3ef9ae38662ddeda2f208f581ac4691f
parentf730cb93db8e640f95ba4acb339d5732e1721730 (diff)
idr: Remove idr_alloc_ext
It has no more users, so remove it. Move idr_alloc() back into idr.c, move the guts of idr_alloc_cmn() into idr_alloc_u32(), remove the wrappers around idr_get_free_cmn() and rename it to idr_get_free(). While there is now no interface to allocate IDs larger than a u32, the IDR internals remain ready to handle a larger ID should a need arise. These changes make it possible to provide the guarantee that, if the nextid pointer points into the object, the object's ID will be initialised before a concurrent lookup can find the object. Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
-rw-r--r--include/linux/idr.h51
-rw-r--r--include/linux/radix-tree.h17
-rw-r--r--lib/idr.c128
-rw-r--r--lib/radix-tree.c3
-rw-r--r--tools/testing/radix-tree/idr-test.c17
5 files changed, 105 insertions, 111 deletions
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 561a4cbabca6..fa2a04b984df 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -15,7 +15,6 @@
15#include <linux/radix-tree.h> 15#include <linux/radix-tree.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/percpu.h> 17#include <linux/percpu.h>
18#include <linux/bug.h>
19 18
20struct idr { 19struct idr {
21 struct radix_tree_root idr_rt; 20 struct radix_tree_root idr_rt;
@@ -82,55 +81,7 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
82 81
83void idr_preload(gfp_t gfp_mask); 82void idr_preload(gfp_t gfp_mask);
84 83
85int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index, 84int idr_alloc(struct idr *, void *, int start, int end, gfp_t);
86 unsigned long start, unsigned long end, gfp_t gfp,
87 bool ext);
88
89/**
90 * idr_alloc - allocate an id
91 * @idr: idr handle
92 * @ptr: pointer to be associated with the new id
93 * @start: the minimum id (inclusive)
94 * @end: the maximum id (exclusive)
95 * @gfp: memory allocation flags
96 *
97 * Allocates an unused ID in the range [start, end). Returns -ENOSPC
98 * if there are no unused IDs in that range.
99 *
100 * Note that @end is treated as max when <= 0. This is to always allow
101 * using @start + N as @end as long as N is inside integer range.
102 *
103 * Simultaneous modifications to the @idr are not allowed and should be
104 * prevented by the user, usually with a lock. idr_alloc() may be called
105 * concurrently with read-only accesses to the @idr, such as idr_find() and
106 * idr_for_each_entry().
107 */
108static inline int idr_alloc(struct idr *idr, void *ptr,
109 int start, int end, gfp_t gfp)
110{
111 unsigned long id;
112 int ret;
113
114 if (WARN_ON_ONCE(start < 0))
115 return -EINVAL;
116
117 ret = idr_alloc_cmn(idr, ptr, &id, start, end, gfp, false);
118
119 if (ret)
120 return ret;
121
122 return id;
123}
124
125static inline int idr_alloc_ext(struct idr *idr, void *ptr,
126 unsigned long *index,
127 unsigned long start,
128 unsigned long end,
129 gfp_t gfp)
130{
131 return idr_alloc_cmn(idr, ptr, index, start, end, gfp, true);
132}
133
134int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *nextid, 85int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *nextid,
135 unsigned long max, gfp_t); 86 unsigned long max, gfp_t);
136int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t); 87int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 23a9c89c7ad9..fc55ff31eca7 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -356,24 +356,9 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index,
356int radix_tree_join(struct radix_tree_root *, unsigned long index, 356int radix_tree_join(struct radix_tree_root *, unsigned long index,
357 unsigned new_order, void *); 357 unsigned new_order, void *);
358 358
359void __rcu **idr_get_free_cmn(struct radix_tree_root *root, 359void __rcu **idr_get_free(struct radix_tree_root *root,
360 struct radix_tree_iter *iter, gfp_t gfp, 360 struct radix_tree_iter *iter, gfp_t gfp,
361 unsigned long max); 361 unsigned long max);
362static inline void __rcu **idr_get_free(struct radix_tree_root *root,
363 struct radix_tree_iter *iter,
364 gfp_t gfp,
365 int end)
366{
367 return idr_get_free_cmn(root, iter, gfp, end > 0 ? end - 1 : INT_MAX);
368}
369
370static inline void __rcu **idr_get_free_ext(struct radix_tree_root *root,
371 struct radix_tree_iter *iter,
372 gfp_t gfp,
373 unsigned long end)
374{
375 return idr_get_free_cmn(root, iter, gfp, end - 1);
376}
377 362
378enum { 363enum {
379 RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ 364 RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
diff --git a/lib/idr.c b/lib/idr.c
index 6d4ec2c2982b..c3f16307b908 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -1,4 +1,5 @@
1#include <linux/bitmap.h> 1#include <linux/bitmap.h>
2#include <linux/bug.h>
2#include <linux/export.h> 3#include <linux/export.h>
3#include <linux/idr.h> 4#include <linux/idr.h>
4#include <linux/slab.h> 5#include <linux/slab.h>
@@ -17,7 +18,9 @@ static DEFINE_SPINLOCK(simple_ida_lock);
17 * 18 *
18 * Allocates an unused ID in the range specified by @nextid and @max. 19 * Allocates an unused ID in the range specified by @nextid and @max.
19 * Note that @max is inclusive whereas the @end parameter to idr_alloc() 20 * Note that @max is inclusive whereas the @end parameter to idr_alloc()
20 * is exclusive. 21 * is exclusive. The new ID is assigned to @nextid before the pointer
22 * is inserted into the IDR, so if @nextid points into the object pointed
23 * to by @ptr, a concurrent lookup will not find an uninitialised ID.
21 * 24 *
22 * The caller should provide their own locking to ensure that two 25 * The caller should provide their own locking to ensure that two
23 * concurrent modifications to the IDR are not possible. Read-only 26 * concurrent modifications to the IDR are not possible. Read-only
@@ -31,66 +34,103 @@ static DEFINE_SPINLOCK(simple_ida_lock);
31int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid, 34int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
32 unsigned long max, gfp_t gfp) 35 unsigned long max, gfp_t gfp)
33{ 36{
34 unsigned long tmp = *nextid;
35 int ret = idr_alloc_ext(idr, ptr, &tmp, tmp, max + 1, gfp);
36 *nextid = tmp;
37 return ret;
38}
39EXPORT_SYMBOL_GPL(idr_alloc_u32);
40
41int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index,
42 unsigned long start, unsigned long end, gfp_t gfp,
43 bool ext)
44{
45 struct radix_tree_iter iter; 37 struct radix_tree_iter iter;
46 void __rcu **slot; 38 void __rcu **slot;
47 39
48 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) 40 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
49 return -EINVAL; 41 return -EINVAL;
42 if (WARN_ON_ONCE(!(idr->idr_rt.gfp_mask & ROOT_IS_IDR)))
43 idr->idr_rt.gfp_mask |= IDR_RT_MARKER;
50 44
51 radix_tree_iter_init(&iter, start); 45 radix_tree_iter_init(&iter, *nextid);
52 if (ext) 46 slot = idr_get_free(&idr->idr_rt, &iter, gfp, max);
53 slot = idr_get_free_ext(&idr->idr_rt, &iter, gfp, end);
54 else
55 slot = idr_get_free(&idr->idr_rt, &iter, gfp, end);
56 if (IS_ERR(slot)) 47 if (IS_ERR(slot))
57 return PTR_ERR(slot); 48 return PTR_ERR(slot);
58 49
50 *nextid = iter.index;
51 /* there is a memory barrier inside radix_tree_iter_replace() */
59 radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr); 52 radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
60 radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE); 53 radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
61 54
62 if (index)
63 *index = iter.index;
64 return 0; 55 return 0;
65} 56}
66EXPORT_SYMBOL_GPL(idr_alloc_cmn); 57EXPORT_SYMBOL_GPL(idr_alloc_u32);
67 58
68/** 59/**
69 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion 60 * idr_alloc() - Allocate an ID.
70 * @idr: idr handle 61 * @idr: IDR handle.
71 * @ptr: pointer to be associated with the new id 62 * @ptr: Pointer to be associated with the new ID.
72 * @start: the minimum id (inclusive) 63 * @start: The minimum ID (inclusive).
73 * @end: the maximum id (exclusive) 64 * @end: The maximum ID (exclusive).
74 * @gfp: memory allocation flags 65 * @gfp: Memory allocation flags.
75 * 66 *
76 * Allocates an ID larger than the last ID allocated if one is available. 67 * Allocates an unused ID in the range specified by @start and @end. If
77 * If not, it will attempt to allocate the smallest ID that is larger or 68 * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
78 * equal to @start. 69 * callers to use @start + N as @end as long as N is within integer range.
70 *
71 * The caller should provide their own locking to ensure that two
72 * concurrent modifications to the IDR are not possible. Read-only
73 * accesses to the IDR may be done under the RCU read lock or may
74 * exclude simultaneous writers.
75 *
76 * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
77 * or -ENOSPC if no free IDs could be found.
79 */ 78 */
80int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) 79int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
81{ 80{
82 int id, curr = idr->idr_next; 81 u32 id = start;
82 int ret;
83
84 if (WARN_ON_ONCE(start < 0))
85 return -EINVAL;
83 86
84 if (curr < start) 87 ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
85 curr = start; 88 if (ret)
89 return ret;
86 90
87 id = idr_alloc(idr, ptr, curr, end, gfp); 91 return id;
88 if ((id == -ENOSPC) && (curr > start)) 92}
89 id = idr_alloc(idr, ptr, start, curr, gfp); 93EXPORT_SYMBOL_GPL(idr_alloc);
90 94
91 if (id >= 0) 95/**
92 idr->idr_next = id + 1U; 96 * idr_alloc_cyclic() - Allocate an ID cyclically.
97 * @idr: IDR handle.
98 * @ptr: Pointer to be associated with the new ID.
99 * @start: The minimum ID (inclusive).
100 * @end: The maximum ID (exclusive).
101 * @gfp: Memory allocation flags.
102 *
103 * Allocates an unused ID in the range specified by @nextid and @end. If
104 * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
105 * callers to use @start + N as @end as long as N is within integer range.
106 * The search for an unused ID will start at the last ID allocated and will
107 * wrap around to @start if no free IDs are found before reaching @end.
108 *
109 * The caller should provide their own locking to ensure that two
110 * concurrent modifications to the IDR are not possible. Read-only
111 * accesses to the IDR may be done under the RCU read lock or may
112 * exclude simultaneous writers.
113 *
114 * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
115 * or -ENOSPC if no free IDs could be found.
116 */
117int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
118{
119 u32 id = idr->idr_next;
120 int err, max = end > 0 ? end - 1 : INT_MAX;
93 121
122 if ((int)id < start)
123 id = start;
124
125 err = idr_alloc_u32(idr, ptr, &id, max, gfp);
126 if ((err == -ENOSPC) && (id > start)) {
127 id = start;
128 err = idr_alloc_u32(idr, ptr, &id, max, gfp);
129 }
130 if (err)
131 return err;
132
133 idr->idr_next = id + 1;
94 return id; 134 return id;
95} 135}
96EXPORT_SYMBOL(idr_alloc_cyclic); 136EXPORT_SYMBOL(idr_alloc_cyclic);
@@ -167,10 +207,10 @@ void *idr_get_next_ext(struct idr *idr, unsigned long *nextid)
167EXPORT_SYMBOL(idr_get_next_ext); 207EXPORT_SYMBOL(idr_get_next_ext);
168 208
169/** 209/**
170 * idr_replace - replace pointer for given id 210 * idr_replace() - replace pointer for given ID.
171 * @idr: idr handle 211 * @idr: IDR handle.
172 * @ptr: New pointer to associate with the ID 212 * @ptr: New pointer to associate with the ID.
173 * @id: Lookup key 213 * @id: ID to change.
174 * 214 *
175 * Replace the pointer registered with an ID and return the old value. 215 * Replace the pointer registered with an ID and return the old value.
176 * This function can be called under the RCU read lock concurrently with 216 * This function can be called under the RCU read lock concurrently with
@@ -257,7 +297,7 @@ EXPORT_SYMBOL(idr_replace);
257 * bitmap, which is excessive. 297 * bitmap, which is excessive.
258 */ 298 */
259 299
260#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS) 300#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1)
261 301
262/** 302/**
263 * ida_get_new_above - allocate new ID above or equal to a start id 303 * ida_get_new_above - allocate new ID above or equal to a start id
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index c8d55565fafa..0a7ae3288a24 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/bitmap.h> 25#include <linux/bitmap.h>
26#include <linux/bitops.h> 26#include <linux/bitops.h>
27#include <linux/bug.h>
27#include <linux/cpu.h> 28#include <linux/cpu.h>
28#include <linux/errno.h> 29#include <linux/errno.h>
29#include <linux/export.h> 30#include <linux/export.h>
@@ -2135,7 +2136,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
2135} 2136}
2136EXPORT_SYMBOL(ida_pre_get); 2137EXPORT_SYMBOL(ida_pre_get);
2137 2138
2138void __rcu **idr_get_free_cmn(struct radix_tree_root *root, 2139void __rcu **idr_get_free(struct radix_tree_root *root,
2139 struct radix_tree_iter *iter, gfp_t gfp, 2140 struct radix_tree_iter *iter, gfp_t gfp,
2140 unsigned long max) 2141 unsigned long max)
2141{ 2142{
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index 892ef8855b02..36437ade429c 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -215,6 +215,23 @@ void idr_checks(void)
215 215
216 assert(idr_is_empty(&idr)); 216 assert(idr_is_empty(&idr));
217 217
218 idr_set_cursor(&idr, INT_MAX - 3UL);
219 for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) {
220 struct item *item;
221 unsigned int id;
222 if (i <= INT_MAX)
223 item = item_create(i, 0);
224 else
225 item = item_create(i - INT_MAX - 1, 0);
226
227 id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL);
228 assert(id == item->index);
229 }
230
231 idr_for_each(&idr, item_idr_free, &idr);
232 idr_destroy(&idr);
233 assert(idr_is_empty(&idr));
234
218 for (i = 1; i < 10000; i++) { 235 for (i = 1; i < 10000; i++) {
219 struct item *item = item_create(i, 0); 236 struct item *item = item_create(i, 0);
220 assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i); 237 assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);