aboutsummaryrefslogtreecommitdiffstats
path: root/lib/idr.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/idr.c')
-rw-r--r--lib/idr.c50
1 files changed, 31 insertions, 19 deletions
diff --git a/lib/idr.c b/lib/idr.c
index 6415d053e2bf..d226259c3c28 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -6,20 +6,20 @@
6 * Modified by George Anzinger to reuse immediately and to use 6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks. 7 * find bit instructions. Also removed _irq on spinlocks.
8 * 8 *
9 * Small id to pointer translation service. 9 * Small id to pointer translation service.
10 * 10 *
11 * It uses a radix tree like structure as a sparse array indexed 11 * It uses a radix tree like structure as a sparse array indexed
12 * by the id to obtain the pointer. The bitmap makes allocating 12 * by the id to obtain the pointer. The bitmap makes allocating
13 * a new id quick. 13 * a new id quick.
14 * 14 *
15 * You call it to allocate an id (an int) an associate with that id a 15 * You call it to allocate an id (an int) an associate with that id a
16 * pointer or what ever, we treat it as a (void *). You can pass this 16 * pointer or what ever, we treat it as a (void *). You can pass this
17 * id to a user for him to pass back at a later time. You then pass 17 * id to a user for him to pass back at a later time. You then pass
18 * that id to this code and it returns your pointer. 18 * that id to this code and it returns your pointer.
19 19
20 * You can release ids at any time. When all ids are released, most of 20 * You can release ids at any time. When all ids are released, most of
21 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we 21 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
22 * don't need to go to the memory "store" during an id allocate, just 22 * don't need to go to the memory "store" during an id allocate, just
23 * so you don't need to be too concerned about locking and conflicts 23 * so you don't need to be too concerned about locking and conflicts
24 * with the slab allocator. 24 * with the slab allocator.
25 */ 25 */
@@ -72,12 +72,12 @@ static void free_layer(struct idr *idp, struct idr_layer *p)
72 * If the system is REALLY out of memory this function returns 0, 72 * If the system is REALLY out of memory this function returns 0,
73 * otherwise 1. 73 * otherwise 1.
74 */ 74 */
75int idr_pre_get(struct idr *idp, unsigned gfp_mask) 75int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
76{ 76{
77 while (idp->id_free_cnt < IDR_FREE_MAX) { 77 while (idp->id_free_cnt < IDR_FREE_MAX) {
78 struct idr_layer *new; 78 struct idr_layer *new;
79 new = kmem_cache_alloc(idr_layer_cache, gfp_mask); 79 new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
80 if(new == NULL) 80 if (new == NULL)
81 return (0); 81 return (0);
82 free_layer(idp, new); 82 free_layer(idp, new);
83 } 83 }
@@ -107,7 +107,7 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
107 if (m == IDR_SIZE) { 107 if (m == IDR_SIZE) {
108 /* no space available go back to previous layer. */ 108 /* no space available go back to previous layer. */
109 l++; 109 l++;
110 id = (id | ((1 << (IDR_BITS*l))-1)) + 1; 110 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
111 if (!(p = pa[l])) { 111 if (!(p = pa[l])) {
112 *starting_id = id; 112 *starting_id = id;
113 return -2; 113 return -2;
@@ -161,7 +161,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
161{ 161{
162 struct idr_layer *p, *new; 162 struct idr_layer *p, *new;
163 int layers, v, id; 163 int layers, v, id;
164 164
165 id = starting_id; 165 id = starting_id;
166build_up: 166build_up:
167 p = idp->top; 167 p = idp->top;
@@ -225,6 +225,7 @@ build_up:
225int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) 225int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
226{ 226{
227 int rv; 227 int rv;
228
228 rv = idr_get_new_above_int(idp, ptr, starting_id); 229 rv = idr_get_new_above_int(idp, ptr, starting_id);
229 /* 230 /*
230 * This is a cheap hack until the IDR code can be fixed to 231 * This is a cheap hack until the IDR code can be fixed to
@@ -259,6 +260,7 @@ EXPORT_SYMBOL(idr_get_new_above);
259int idr_get_new(struct idr *idp, void *ptr, int *id) 260int idr_get_new(struct idr *idp, void *ptr, int *id)
260{ 261{
261 int rv; 262 int rv;
263
262 rv = idr_get_new_above_int(idp, ptr, 0); 264 rv = idr_get_new_above_int(idp, ptr, 0);
263 /* 265 /*
264 * This is a cheap hack until the IDR code can be fixed to 266 * This is a cheap hack until the IDR code can be fixed to
@@ -306,11 +308,10 @@ static void sub_remove(struct idr *idp, int shift, int id)
306 free_layer(idp, **paa); 308 free_layer(idp, **paa);
307 **paa-- = NULL; 309 **paa-- = NULL;
308 } 310 }
309 if ( ! *paa ) 311 if (!*paa)
310 idp->layers = 0; 312 idp->layers = 0;
311 } else { 313 } else
312 idr_remove_warning(id); 314 idr_remove_warning(id);
313 }
314} 315}
315 316
316/** 317/**
@@ -326,9 +327,8 @@ void idr_remove(struct idr *idp, int id)
326 id &= MAX_ID_MASK; 327 id &= MAX_ID_MASK;
327 328
328 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 329 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
329 if ( idp->top && idp->top->count == 1 && 330 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
330 (idp->layers > 1) && 331 idp->top->ary[0]) { // We can drop a layer
331 idp->top->ary[0]){ // We can drop a layer
332 332
333 p = idp->top->ary[0]; 333 p = idp->top->ary[0];
334 idp->top->bitmap = idp->top->count = 0; 334 idp->top->bitmap = idp->top->count = 0;
@@ -337,7 +337,6 @@ void idr_remove(struct idr *idp, int id)
337 --idp->layers; 337 --idp->layers;
338 } 338 }
339 while (idp->id_free_cnt >= IDR_FREE_MAX) { 339 while (idp->id_free_cnt >= IDR_FREE_MAX) {
340
341 p = alloc_layer(idp); 340 p = alloc_layer(idp);
342 kmem_cache_free(idr_layer_cache, p); 341 kmem_cache_free(idr_layer_cache, p);
343 return; 342 return;
@@ -346,6 +345,19 @@ void idr_remove(struct idr *idp, int id)
346EXPORT_SYMBOL(idr_remove); 345EXPORT_SYMBOL(idr_remove);
347 346
348/** 347/**
348 * idr_destroy - release all cached layers within an idr tree
349 * idp: idr handle
350 */
351void idr_destroy(struct idr *idp)
352{
353 while (idp->id_free_cnt) {
354 struct idr_layer *p = alloc_layer(idp);
355 kmem_cache_free(idr_layer_cache, p);
356 }
357}
358EXPORT_SYMBOL(idr_destroy);
359
360/**
349 * idr_find - return pointer for given id 361 * idr_find - return pointer for given id
350 * @idp: idr handle 362 * @idp: idr handle
351 * @id: lookup key 363 * @id: lookup key
@@ -378,8 +390,8 @@ void *idr_find(struct idr *idp, int id)
378} 390}
379EXPORT_SYMBOL(idr_find); 391EXPORT_SYMBOL(idr_find);
380 392
381static void idr_cache_ctor(void * idr_layer, 393static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache,
382 kmem_cache_t *idr_layer_cache, unsigned long flags) 394 unsigned long flags)
383{ 395{
384 memset(idr_layer, 0, sizeof(struct idr_layer)); 396 memset(idr_layer, 0, sizeof(struct idr_layer));
385} 397}
@@ -387,7 +399,7 @@ static void idr_cache_ctor(void * idr_layer,
387static int init_id_cache(void) 399static int init_id_cache(void)
388{ 400{
389 if (!idr_layer_cache) 401 if (!idr_layer_cache)
390 idr_layer_cache = kmem_cache_create("idr_layer_cache", 402 idr_layer_cache = kmem_cache_create("idr_layer_cache",
391 sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); 403 sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
392 return 0; 404 return 0;
393} 405}