aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/idr.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/lib/idr.c b/lib/idr.c
index d226259c3c28..de19030a999b 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -48,15 +48,21 @@ static struct idr_layer *alloc_layer(struct idr *idp)
48 return(p); 48 return(p);
49} 49}
50 50
51/* only called when idp->lock is held */
52static void __free_layer(struct idr *idp, struct idr_layer *p)
53{
54 p->ary[0] = idp->id_free;
55 idp->id_free = p;
56 idp->id_free_cnt++;
57}
58
51static void free_layer(struct idr *idp, struct idr_layer *p) 59static void free_layer(struct idr *idp, struct idr_layer *p)
52{ 60{
53 /* 61 /*
54 * Depends on the return element being zeroed. 62 * Depends on the return element being zeroed.
55 */ 63 */
56 spin_lock(&idp->lock); 64 spin_lock(&idp->lock);
57 p->ary[0] = idp->id_free; 65 __free_layer(idp, p);
58 idp->id_free = p;
59 idp->id_free_cnt++;
60 spin_unlock(&idp->lock); 66 spin_unlock(&idp->lock);
61} 67}
62 68
@@ -184,12 +190,14 @@ build_up:
184 * The allocation failed. If we built part of 190 * The allocation failed. If we built part of
185 * the structure tear it down. 191 * the structure tear it down.
186 */ 192 */
193 spin_lock(&idp->lock);
187 for (new = p; p && p != idp->top; new = p) { 194 for (new = p; p && p != idp->top; new = p) {
188 p = p->ary[0]; 195 p = p->ary[0];
189 new->ary[0] = NULL; 196 new->ary[0] = NULL;
190 new->bitmap = new->count = 0; 197 new->bitmap = new->count = 0;
191 free_layer(idp, new); 198 __free_layer(idp, new);
192 } 199 }
200 spin_unlock(&idp->lock);
193 return -1; 201 return -1;
194 } 202 }
195 new->ary[0] = p; 203 new->ary[0] = p;