diff options
| author | Anton Altaparmakov <aia21@cantab.net> | 2005-10-31 05:06:46 -0500 |
|---|---|---|
| committer | Anton Altaparmakov <aia21@cantab.net> | 2005-10-31 05:06:46 -0500 |
| commit | 1f04c0a24b2f3cfe89c802a24396263623e3512d (patch) | |
| tree | d7e2216b6e65b833c0c2b79b478d13ce17dbf296 /lib/idr.c | |
| parent | 07b188ab773e183871e57b33ae37bf635c9f12ba (diff) | |
| parent | e2f2e58e7968f8446b1078a20a18bf8ea12b4fbc (diff) | |
Merge branch 'master' of /usr/src/ntfs-2.6/
Diffstat (limited to 'lib/idr.c')
| -rw-r--r-- | lib/idr.c | 35 |
1 files changed, 17 insertions, 18 deletions
| @@ -6,20 +6,20 @@ | |||
| 6 | * Modified by George Anzinger to reuse immediately and to use | 6 | * Modified by George Anzinger to reuse immediately and to use |
| 7 | * find bit instructions. Also removed _irq on spinlocks. | 7 | * find bit instructions. Also removed _irq on spinlocks. |
| 8 | * | 8 | * |
| 9 | * Small id to pointer translation service. | 9 | * Small id to pointer translation service. |
| 10 | * | 10 | * |
| 11 | * It uses a radix tree like structure as a sparse array indexed | 11 | * It uses a radix tree like structure as a sparse array indexed |
| 12 | * by the id to obtain the pointer. The bitmap makes allocating | 12 | * by the id to obtain the pointer. The bitmap makes allocating |
| 13 | * a new id quick. | 13 | * a new id quick. |
| 14 | * | 14 | * |
| 15 | * You call it to allocate an id (an int) an associate with that id a | 15 | * You call it to allocate an id (an int) an associate with that id a |
| 16 | * pointer or what ever, we treat it as a (void *). You can pass this | 16 | * pointer or what ever, we treat it as a (void *). You can pass this |
| 17 | * id to a user for him to pass back at a later time. You then pass | 17 | * id to a user for him to pass back at a later time. You then pass |
| 18 | * that id to this code and it returns your pointer. | 18 | * that id to this code and it returns your pointer. |
| 19 | 19 | ||
| 20 | * You can release ids at any time. When all ids are released, most of | 20 | * You can release ids at any time. When all ids are released, most of |
| 21 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we | 21 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
| 22 | * don't need to go to the memory "store" during an id allocate, just | 22 | * don't need to go to the memory "store" during an id allocate, just |
| 23 | * so you don't need to be too concerned about locking and conflicts | 23 | * so you don't need to be too concerned about locking and conflicts |
| 24 | * with the slab allocator. | 24 | * with the slab allocator. |
| 25 | */ | 25 | */ |
| @@ -77,7 +77,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | |||
| 77 | while (idp->id_free_cnt < IDR_FREE_MAX) { | 77 | while (idp->id_free_cnt < IDR_FREE_MAX) { |
| 78 | struct idr_layer *new; | 78 | struct idr_layer *new; |
| 79 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); | 79 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); |
| 80 | if(new == NULL) | 80 | if (new == NULL) |
| 81 | return (0); | 81 | return (0); |
| 82 | free_layer(idp, new); | 82 | free_layer(idp, new); |
| 83 | } | 83 | } |
| @@ -107,7 +107,7 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) | |||
| 107 | if (m == IDR_SIZE) { | 107 | if (m == IDR_SIZE) { |
| 108 | /* no space available go back to previous layer. */ | 108 | /* no space available go back to previous layer. */ |
| 109 | l++; | 109 | l++; |
| 110 | id = (id | ((1 << (IDR_BITS*l))-1)) + 1; | 110 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
| 111 | if (!(p = pa[l])) { | 111 | if (!(p = pa[l])) { |
| 112 | *starting_id = id; | 112 | *starting_id = id; |
| 113 | return -2; | 113 | return -2; |
| @@ -161,7 +161,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
| 161 | { | 161 | { |
| 162 | struct idr_layer *p, *new; | 162 | struct idr_layer *p, *new; |
| 163 | int layers, v, id; | 163 | int layers, v, id; |
| 164 | 164 | ||
| 165 | id = starting_id; | 165 | id = starting_id; |
| 166 | build_up: | 166 | build_up: |
| 167 | p = idp->top; | 167 | p = idp->top; |
| @@ -225,6 +225,7 @@ build_up: | |||
| 225 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | 225 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
| 226 | { | 226 | { |
| 227 | int rv; | 227 | int rv; |
| 228 | |||
| 228 | rv = idr_get_new_above_int(idp, ptr, starting_id); | 229 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
| 229 | /* | 230 | /* |
| 230 | * This is a cheap hack until the IDR code can be fixed to | 231 | * This is a cheap hack until the IDR code can be fixed to |
| @@ -259,6 +260,7 @@ EXPORT_SYMBOL(idr_get_new_above); | |||
| 259 | int idr_get_new(struct idr *idp, void *ptr, int *id) | 260 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
| 260 | { | 261 | { |
| 261 | int rv; | 262 | int rv; |
| 263 | |||
| 262 | rv = idr_get_new_above_int(idp, ptr, 0); | 264 | rv = idr_get_new_above_int(idp, ptr, 0); |
| 263 | /* | 265 | /* |
| 264 | * This is a cheap hack until the IDR code can be fixed to | 266 | * This is a cheap hack until the IDR code can be fixed to |
| @@ -306,11 +308,10 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
| 306 | free_layer(idp, **paa); | 308 | free_layer(idp, **paa); |
| 307 | **paa-- = NULL; | 309 | **paa-- = NULL; |
| 308 | } | 310 | } |
| 309 | if ( ! *paa ) | 311 | if (!*paa) |
| 310 | idp->layers = 0; | 312 | idp->layers = 0; |
| 311 | } else { | 313 | } else |
| 312 | idr_remove_warning(id); | 314 | idr_remove_warning(id); |
| 313 | } | ||
| 314 | } | 315 | } |
| 315 | 316 | ||
| 316 | /** | 317 | /** |
| @@ -326,9 +327,8 @@ void idr_remove(struct idr *idp, int id) | |||
| 326 | id &= MAX_ID_MASK; | 327 | id &= MAX_ID_MASK; |
| 327 | 328 | ||
| 328 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | 329 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
| 329 | if ( idp->top && idp->top->count == 1 && | 330 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
| 330 | (idp->layers > 1) && | 331 | idp->top->ary[0]) { // We can drop a layer |
| 331 | idp->top->ary[0]){ // We can drop a layer | ||
| 332 | 332 | ||
| 333 | p = idp->top->ary[0]; | 333 | p = idp->top->ary[0]; |
| 334 | idp->top->bitmap = idp->top->count = 0; | 334 | idp->top->bitmap = idp->top->count = 0; |
| @@ -337,7 +337,6 @@ void idr_remove(struct idr *idp, int id) | |||
| 337 | --idp->layers; | 337 | --idp->layers; |
| 338 | } | 338 | } |
| 339 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | 339 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
| 340 | |||
| 341 | p = alloc_layer(idp); | 340 | p = alloc_layer(idp); |
| 342 | kmem_cache_free(idr_layer_cache, p); | 341 | kmem_cache_free(idr_layer_cache, p); |
| 343 | return; | 342 | return; |
| @@ -391,8 +390,8 @@ void *idr_find(struct idr *idp, int id) | |||
| 391 | } | 390 | } |
| 392 | EXPORT_SYMBOL(idr_find); | 391 | EXPORT_SYMBOL(idr_find); |
| 393 | 392 | ||
| 394 | static void idr_cache_ctor(void * idr_layer, | 393 | static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache, |
| 395 | kmem_cache_t *idr_layer_cache, unsigned long flags) | 394 | unsigned long flags) |
| 396 | { | 395 | { |
| 397 | memset(idr_layer, 0, sizeof(struct idr_layer)); | 396 | memset(idr_layer, 0, sizeof(struct idr_layer)); |
| 398 | } | 397 | } |
| @@ -400,7 +399,7 @@ static void idr_cache_ctor(void * idr_layer, | |||
| 400 | static int init_id_cache(void) | 399 | static int init_id_cache(void) |
| 401 | { | 400 | { |
| 402 | if (!idr_layer_cache) | 401 | if (!idr_layer_cache) |
| 403 | idr_layer_cache = kmem_cache_create("idr_layer_cache", | 402 | idr_layer_cache = kmem_cache_create("idr_layer_cache", |
| 404 | sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); | 403 | sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); |
| 405 | return 0; | 404 | return 0; |
| 406 | } | 405 | } |
