aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/idr.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/lib/idr.c b/lib/idr.c
index 4d096819511a..16d2143fea48 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -38,14 +38,15 @@ static kmem_cache_t *idr_layer_cache;
38static struct idr_layer *alloc_layer(struct idr *idp) 38static struct idr_layer *alloc_layer(struct idr *idp)
39{ 39{
40 struct idr_layer *p; 40 struct idr_layer *p;
41 unsigned long flags;
41 42
42 spin_lock(&idp->lock); 43 spin_lock_irqsave(&idp->lock, flags);
43 if ((p = idp->id_free)) { 44 if ((p = idp->id_free)) {
44 idp->id_free = p->ary[0]; 45 idp->id_free = p->ary[0];
45 idp->id_free_cnt--; 46 idp->id_free_cnt--;
46 p->ary[0] = NULL; 47 p->ary[0] = NULL;
47 } 48 }
48 spin_unlock(&idp->lock); 49 spin_unlock_irqrestore(&idp->lock, flags);
49 return(p); 50 return(p);
50} 51}
51 52
@@ -59,12 +60,14 @@ static void __free_layer(struct idr *idp, struct idr_layer *p)
59 60
60static void free_layer(struct idr *idp, struct idr_layer *p) 61static void free_layer(struct idr *idp, struct idr_layer *p)
61{ 62{
63 unsigned long flags;
64
62 /* 65 /*
63 * Depends on the return element being zeroed. 66 * Depends on the return element being zeroed.
64 */ 67 */
65 spin_lock(&idp->lock); 68 spin_lock_irqsave(&idp->lock, flags);
66 __free_layer(idp, p); 69 __free_layer(idp, p);
67 spin_unlock(&idp->lock); 70 spin_unlock_irqrestore(&idp->lock, flags);
68} 71}
69 72
70/** 73/**
@@ -168,6 +171,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
168{ 171{
169 struct idr_layer *p, *new; 172 struct idr_layer *p, *new;
170 int layers, v, id; 173 int layers, v, id;
174 unsigned long flags;
171 175
172 id = starting_id; 176 id = starting_id;
173build_up: 177build_up:
@@ -191,14 +195,14 @@ build_up:
191 * The allocation failed. If we built part of 195 * The allocation failed. If we built part of
192 * the structure tear it down. 196 * the structure tear it down.
193 */ 197 */
194 spin_lock(&idp->lock); 198 spin_lock_irqsave(&idp->lock, flags);
195 for (new = p; p && p != idp->top; new = p) { 199 for (new = p; p && p != idp->top; new = p) {
196 p = p->ary[0]; 200 p = p->ary[0];
197 new->ary[0] = NULL; 201 new->ary[0] = NULL;
198 new->bitmap = new->count = 0; 202 new->bitmap = new->count = 0;
199 __free_layer(idp, new); 203 __free_layer(idp, new);
200 } 204 }
201 spin_unlock(&idp->lock); 205 spin_unlock_irqrestore(&idp->lock, flags);
202 return -1; 206 return -1;
203 } 207 }
204 new->ary[0] = p; 208 new->ary[0] = p;