diff options
-rw-r--r-- | include/linux/idr.h | 6 | ||||
-rw-r--r-- | lib/idr.c | 35 |
2 files changed, 23 insertions, 18 deletions
diff --git a/include/linux/idr.h b/include/linux/idr.h index ff44bc83f3cb..837f152b1383 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h | |||
@@ -70,12 +70,6 @@ struct idr { | |||
70 | } | 70 | } |
71 | #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) | 71 | #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) |
72 | 72 | ||
73 | /* Actions to be taken after a call to _idr_sub_alloc */ | ||
74 | #define IDR_NEED_TO_GROW -2 | ||
75 | #define IDR_NOMORE_SPACE -3 | ||
76 | |||
77 | #define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC) | ||
78 | |||
79 | /** | 73 | /** |
80 | * DOC: idr sync | 74 | * DOC: idr sync |
81 | * idr synchronization (stolen from radix-tree.h) | 75 | * idr synchronization (stolen from radix-tree.h) |
@@ -133,6 +133,21 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | |||
133 | } | 133 | } |
134 | EXPORT_SYMBOL(idr_pre_get); | 134 | EXPORT_SYMBOL(idr_pre_get); |
135 | 135 | ||
136 | /** | ||
137 | * sub_alloc - try to allocate an id without growing the tree depth | ||
138 | * @idp: idr handle | ||
139 | * @starting_id: id to start search at | ||
140 | * @id: pointer to the allocated handle | ||
141 | * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer | ||
142 | * | ||
143 | * Allocate an id in range [@starting_id, INT_MAX] from @idp without | ||
144 | * growing its depth. Returns | ||
145 | * | ||
146 | * the allocated id >= 0 if successful, | ||
147 | * -EAGAIN if the tree needs to grow for allocation to succeed, | ||
148 | * -ENOSPC if the id space is exhausted, | ||
149 | * -ENOMEM if more idr_layers need to be allocated. | ||
150 | */ | ||
136 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | 151 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
137 | { | 152 | { |
138 | int n, m, sh; | 153 | int n, m, sh; |
@@ -161,7 +176,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
161 | /* if already at the top layer, we need to grow */ | 176 | /* if already at the top layer, we need to grow */ |
162 | if (id >= 1 << (idp->layers * IDR_BITS)) { | 177 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
163 | *starting_id = id; | 178 | *starting_id = id; |
164 | return IDR_NEED_TO_GROW; | 179 | return -EAGAIN; |
165 | } | 180 | } |
166 | p = pa[l]; | 181 | p = pa[l]; |
167 | BUG_ON(!p); | 182 | BUG_ON(!p); |
@@ -180,7 +195,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
180 | id = ((id >> sh) ^ n ^ m) << sh; | 195 | id = ((id >> sh) ^ n ^ m) << sh; |
181 | } | 196 | } |
182 | if ((id >= MAX_IDR_BIT) || (id < 0)) | 197 | if ((id >= MAX_IDR_BIT) || (id < 0)) |
183 | return IDR_NOMORE_SPACE; | 198 | return -ENOSPC; |
184 | if (l == 0) | 199 | if (l == 0) |
185 | break; | 200 | break; |
186 | /* | 201 | /* |
@@ -189,7 +204,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
189 | if (!p->ary[m]) { | 204 | if (!p->ary[m]) { |
190 | new = get_from_free_list(idp); | 205 | new = get_from_free_list(idp); |
191 | if (!new) | 206 | if (!new) |
192 | return -1; | 207 | return -ENOMEM; |
193 | new->layer = l-1; | 208 | new->layer = l-1; |
194 | rcu_assign_pointer(p->ary[m], new); | 209 | rcu_assign_pointer(p->ary[m], new); |
195 | p->count++; | 210 | p->count++; |
@@ -215,7 +230,7 @@ build_up: | |||
215 | layers = idp->layers; | 230 | layers = idp->layers; |
216 | if (unlikely(!p)) { | 231 | if (unlikely(!p)) { |
217 | if (!(p = get_from_free_list(idp))) | 232 | if (!(p = get_from_free_list(idp))) |
218 | return -1; | 233 | return -ENOMEM; |
219 | p->layer = 0; | 234 | p->layer = 0; |
220 | layers = 1; | 235 | layers = 1; |
221 | } | 236 | } |
@@ -246,7 +261,7 @@ build_up: | |||
246 | __move_to_free_list(idp, new); | 261 | __move_to_free_list(idp, new); |
247 | } | 262 | } |
248 | spin_unlock_irqrestore(&idp->lock, flags); | 263 | spin_unlock_irqrestore(&idp->lock, flags); |
249 | return -1; | 264 | return -ENOMEM; |
250 | } | 265 | } |
251 | new->ary[0] = p; | 266 | new->ary[0] = p; |
252 | new->count = 1; | 267 | new->count = 1; |
@@ -258,7 +273,7 @@ build_up: | |||
258 | rcu_assign_pointer(idp->top, p); | 273 | rcu_assign_pointer(idp->top, p); |
259 | idp->layers = layers; | 274 | idp->layers = layers; |
260 | v = sub_alloc(idp, &id, pa); | 275 | v = sub_alloc(idp, &id, pa); |
261 | if (v == IDR_NEED_TO_GROW) | 276 | if (v == -EAGAIN) |
262 | goto build_up; | 277 | goto build_up; |
263 | return(v); | 278 | return(v); |
264 | } | 279 | } |
@@ -306,12 +321,8 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | |||
306 | int rv; | 321 | int rv; |
307 | 322 | ||
308 | rv = idr_get_new_above_int(idp, ptr, starting_id); | 323 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
309 | /* | ||
310 | * This is a cheap hack until the IDR code can be fixed to | ||
311 | * return proper error values. | ||
312 | */ | ||
313 | if (rv < 0) | 324 | if (rv < 0) |
314 | return _idr_rc_to_errno(rv); | 325 | return rv == -ENOMEM ? -EAGAIN : rv; |
315 | *id = rv; | 326 | *id = rv; |
316 | return 0; | 327 | return 0; |
317 | } | 328 | } |
@@ -766,7 +777,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
766 | /* get vacant slot */ | 777 | /* get vacant slot */ |
767 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | 778 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
768 | if (t < 0) | 779 | if (t < 0) |
769 | return _idr_rc_to_errno(t); | 780 | return t == -ENOMEM ? -EAGAIN : t; |
770 | 781 | ||
771 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) | 782 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) |
772 | return -ENOSPC; | 783 | return -ENOSPC; |