aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-02-27 20:03:53 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 22:10:14 -0500
commit12d1b4393e0d8df36b2646a5e512f0513fb532d2 (patch)
treea705b06b3d0ece29b0f34fedba87d18543ac1b80 /lib
parent49038ef4fbe2842bd4d8338f89ec5c9ba71b0ae1 (diff)
idr: remove _idr_rc_to_errno() hack
idr uses -1, IDR_NEED_TO_GROW and IDR_NOMORE_SPACE to communicate exception conditions internally. The return value is later translated to errno values using _idr_rc_to_errno(). This is confusing. Drop the custom ones and consistently use -EAGAIN for "tree needs to grow", -ENOMEM for "need more memory" and -ENOSPC for "ran out of ID space". Due to the weird memory preloading mechanism, [ra]_get_new*() return -EAGAIN on memory shortage, so we need to substitute -ENOMEM w/ -EAGAIN on those interface functions. They'll eventually be cleaned up and the translations will go away. This patch doesn't introduce any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/idr.c35
1 files changed, 23 insertions, 12 deletions
diff --git a/lib/idr.c b/lib/idr.c
index 282841b5a561..bde6eecb0e87 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -133,6 +133,21 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
133} 133}
134EXPORT_SYMBOL(idr_pre_get); 134EXPORT_SYMBOL(idr_pre_get);
135 135
136/**
137 * sub_alloc - try to allocate an id without growing the tree depth
138 * @idp: idr handle
139 * @starting_id: id to start search at
140 * @id: pointer to the allocated handle
141 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
142 *
143 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
144 * growing its depth. Returns
145 *
146 * the allocated id >= 0 if successful,
147 * -EAGAIN if the tree needs to grow for allocation to succeed,
148 * -ENOSPC if the id space is exhausted,
149 * -ENOMEM if more idr_layers need to be allocated.
150 */
136static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) 151static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
137{ 152{
138 int n, m, sh; 153 int n, m, sh;
@@ -161,7 +176,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
161 /* if already at the top layer, we need to grow */ 176 /* if already at the top layer, we need to grow */
162 if (id >= 1 << (idp->layers * IDR_BITS)) { 177 if (id >= 1 << (idp->layers * IDR_BITS)) {
163 *starting_id = id; 178 *starting_id = id;
164 return IDR_NEED_TO_GROW; 179 return -EAGAIN;
165 } 180 }
166 p = pa[l]; 181 p = pa[l];
167 BUG_ON(!p); 182 BUG_ON(!p);
@@ -180,7 +195,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
180 id = ((id >> sh) ^ n ^ m) << sh; 195 id = ((id >> sh) ^ n ^ m) << sh;
181 } 196 }
182 if ((id >= MAX_IDR_BIT) || (id < 0)) 197 if ((id >= MAX_IDR_BIT) || (id < 0))
183 return IDR_NOMORE_SPACE; 198 return -ENOSPC;
184 if (l == 0) 199 if (l == 0)
185 break; 200 break;
186 /* 201 /*
@@ -189,7 +204,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
189 if (!p->ary[m]) { 204 if (!p->ary[m]) {
190 new = get_from_free_list(idp); 205 new = get_from_free_list(idp);
191 if (!new) 206 if (!new)
192 return -1; 207 return -ENOMEM;
193 new->layer = l-1; 208 new->layer = l-1;
194 rcu_assign_pointer(p->ary[m], new); 209 rcu_assign_pointer(p->ary[m], new);
195 p->count++; 210 p->count++;
@@ -215,7 +230,7 @@ build_up:
215 layers = idp->layers; 230 layers = idp->layers;
216 if (unlikely(!p)) { 231 if (unlikely(!p)) {
217 if (!(p = get_from_free_list(idp))) 232 if (!(p = get_from_free_list(idp)))
218 return -1; 233 return -ENOMEM;
219 p->layer = 0; 234 p->layer = 0;
220 layers = 1; 235 layers = 1;
221 } 236 }
@@ -246,7 +261,7 @@ build_up:
246 __move_to_free_list(idp, new); 261 __move_to_free_list(idp, new);
247 } 262 }
248 spin_unlock_irqrestore(&idp->lock, flags); 263 spin_unlock_irqrestore(&idp->lock, flags);
249 return -1; 264 return -ENOMEM;
250 } 265 }
251 new->ary[0] = p; 266 new->ary[0] = p;
252 new->count = 1; 267 new->count = 1;
@@ -258,7 +273,7 @@ build_up:
258 rcu_assign_pointer(idp->top, p); 273 rcu_assign_pointer(idp->top, p);
259 idp->layers = layers; 274 idp->layers = layers;
260 v = sub_alloc(idp, &id, pa); 275 v = sub_alloc(idp, &id, pa);
261 if (v == IDR_NEED_TO_GROW) 276 if (v == -EAGAIN)
262 goto build_up; 277 goto build_up;
263 return(v); 278 return(v);
264} 279}
@@ -306,12 +321,8 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
306 int rv; 321 int rv;
307 322
308 rv = idr_get_new_above_int(idp, ptr, starting_id); 323 rv = idr_get_new_above_int(idp, ptr, starting_id);
309 /*
310 * This is a cheap hack until the IDR code can be fixed to
311 * return proper error values.
312 */
313 if (rv < 0) 324 if (rv < 0)
314 return _idr_rc_to_errno(rv); 325 return rv == -ENOMEM ? -EAGAIN : rv;
315 *id = rv; 326 *id = rv;
316 return 0; 327 return 0;
317} 328}
@@ -766,7 +777,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
766 /* get vacant slot */ 777 /* get vacant slot */
767 t = idr_get_empty_slot(&ida->idr, idr_id, pa); 778 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
768 if (t < 0) 779 if (t < 0)
769 return _idr_rc_to_errno(t); 780 return t == -ENOMEM ? -EAGAIN : t;
770 781
771 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) 782 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
772 return -ENOSPC; 783 return -ENOSPC;