aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/idr.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/lib/idr.c b/lib/idr.c
index 7a02e173f027..8170ace154fb 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -35,7 +35,7 @@
35 35
36static struct kmem_cache *idr_layer_cache; 36static struct kmem_cache *idr_layer_cache;
37 37
38static struct idr_layer *alloc_layer(struct idr *idp) 38static struct idr_layer *get_from_free_list(struct idr *idp)
39{ 39{
40 struct idr_layer *p; 40 struct idr_layer *p;
41 unsigned long flags; 41 unsigned long flags;
@@ -51,14 +51,14 @@ static struct idr_layer *alloc_layer(struct idr *idp)
51} 51}
52 52
53/* only called when idp->lock is held */ 53/* only called when idp->lock is held */
54static void __free_layer(struct idr *idp, struct idr_layer *p) 54static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
55{ 55{
56 p->ary[0] = idp->id_free; 56 p->ary[0] = idp->id_free;
57 idp->id_free = p; 57 idp->id_free = p;
58 idp->id_free_cnt++; 58 idp->id_free_cnt++;
59} 59}
60 60
61static void free_layer(struct idr *idp, struct idr_layer *p) 61static void move_to_free_list(struct idr *idp, struct idr_layer *p)
62{ 62{
63 unsigned long flags; 63 unsigned long flags;
64 64
@@ -66,7 +66,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p)
66 * Depends on the return element being zeroed. 66 * Depends on the return element being zeroed.
67 */ 67 */
68 spin_lock_irqsave(&idp->lock, flags); 68 spin_lock_irqsave(&idp->lock, flags);
69 __free_layer(idp, p); 69 __move_to_free_list(idp, p);
70 spin_unlock_irqrestore(&idp->lock, flags); 70 spin_unlock_irqrestore(&idp->lock, flags);
71} 71}
72 72
@@ -109,7 +109,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
109 new = kmem_cache_alloc(idr_layer_cache, gfp_mask); 109 new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
110 if (new == NULL) 110 if (new == NULL)
111 return (0); 111 return (0);
112 free_layer(idp, new); 112 move_to_free_list(idp, new);
113 } 113 }
114 return 1; 114 return 1;
115} 115}
@@ -167,7 +167,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
167 * Create the layer below if it is missing. 167 * Create the layer below if it is missing.
168 */ 168 */
169 if (!p->ary[m]) { 169 if (!p->ary[m]) {
170 if (!(new = alloc_layer(idp))) 170 new = get_from_free_list(idp);
171 if (!new)
171 return -1; 172 return -1;
172 p->ary[m] = new; 173 p->ary[m] = new;
173 p->count++; 174 p->count++;
@@ -192,7 +193,7 @@ build_up:
192 p = idp->top; 193 p = idp->top;
193 layers = idp->layers; 194 layers = idp->layers;
194 if (unlikely(!p)) { 195 if (unlikely(!p)) {
195 if (!(p = alloc_layer(idp))) 196 if (!(p = get_from_free_list(idp)))
196 return -1; 197 return -1;
197 layers = 1; 198 layers = 1;
198 } 199 }
@@ -204,7 +205,7 @@ build_up:
204 layers++; 205 layers++;
205 if (!p->count) 206 if (!p->count)
206 continue; 207 continue;
207 if (!(new = alloc_layer(idp))) { 208 if (!(new = get_from_free_list(idp))) {
208 /* 209 /*
209 * The allocation failed. If we built part of 210 * The allocation failed. If we built part of
210 * the structure tear it down. 211 * the structure tear it down.
@@ -214,7 +215,7 @@ build_up:
214 p = p->ary[0]; 215 p = p->ary[0];
215 new->ary[0] = NULL; 216 new->ary[0] = NULL;
216 new->bitmap = new->count = 0; 217 new->bitmap = new->count = 0;
217 __free_layer(idp, new); 218 __move_to_free_list(idp, new);
218 } 219 }
219 spin_unlock_irqrestore(&idp->lock, flags); 220 spin_unlock_irqrestore(&idp->lock, flags);
220 return -1; 221 return -1;
@@ -351,7 +352,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
351 __clear_bit(n, &p->bitmap); 352 __clear_bit(n, &p->bitmap);
352 p->ary[n] = NULL; 353 p->ary[n] = NULL;
353 while(*paa && ! --((**paa)->count)){ 354 while(*paa && ! --((**paa)->count)){
354 free_layer(idp, **paa); 355 move_to_free_list(idp, **paa);
355 **paa-- = NULL; 356 **paa-- = NULL;
356 } 357 }
357 if (!*paa) 358 if (!*paa)
@@ -378,12 +379,12 @@ void idr_remove(struct idr *idp, int id)
378 379
379 p = idp->top->ary[0]; 380 p = idp->top->ary[0];
380 idp->top->bitmap = idp->top->count = 0; 381 idp->top->bitmap = idp->top->count = 0;
381 free_layer(idp, idp->top); 382 move_to_free_list(idp, idp->top);
382 idp->top = p; 383 idp->top = p;
383 --idp->layers; 384 --idp->layers;
384 } 385 }
385 while (idp->id_free_cnt >= IDR_FREE_MAX) { 386 while (idp->id_free_cnt >= IDR_FREE_MAX) {
386 p = alloc_layer(idp); 387 p = get_from_free_list(idp);
387 kmem_cache_free(idr_layer_cache, p); 388 kmem_cache_free(idr_layer_cache, p);
388 } 389 }
389 return; 390 return;
@@ -426,7 +427,7 @@ void idr_remove_all(struct idr *idp)
426 while (n < fls(id)) { 427 while (n < fls(id)) {
427 if (p) { 428 if (p) {
428 memset(p, 0, sizeof *p); 429 memset(p, 0, sizeof *p);
429 free_layer(idp, p); 430 move_to_free_list(idp, p);
430 } 431 }
431 n += IDR_BITS; 432 n += IDR_BITS;
432 p = *--paa; 433 p = *--paa;
@@ -444,7 +445,7 @@ EXPORT_SYMBOL(idr_remove_all);
444void idr_destroy(struct idr *idp) 445void idr_destroy(struct idr *idp)
445{ 446{
446 while (idp->id_free_cnt) { 447 while (idp->id_free_cnt) {
447 struct idr_layer *p = alloc_layer(idp); 448 struct idr_layer *p = get_from_free_list(idp);
448 kmem_cache_free(idr_layer_cache, p); 449 kmem_cache_free(idr_layer_cache, p);
449 } 450 }
450} 451}
@@ -749,7 +750,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
749 * allocation. 750 * allocation.
750 */ 751 */
751 if (ida->idr.id_free_cnt || ida->free_bitmap) { 752 if (ida->idr.id_free_cnt || ida->free_bitmap) {
752 struct idr_layer *p = alloc_layer(&ida->idr); 753 struct idr_layer *p = get_from_free_list(&ida->idr);
753 if (p) 754 if (p)
754 kmem_cache_free(idr_layer_cache, p); 755 kmem_cache_free(idr_layer_cache, p);
755 } 756 }