diff options
author | Fengguang Wu <fengguang.wu@intel.com> | 2012-10-04 20:13:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-05 14:04:56 -0400 |
commit | 125c4c706b680c7831f0966ff873c1ad0354ec25 (patch) | |
tree | 2b45c5fdd3f69173774fc9ae92c3568cb9796d37 /lib | |
parent | b74ca3b3fd0fac08167ff96287cece56e8ca6f4d (diff) |
idr: rename MAX_LEVEL to MAX_IDR_LEVEL
To avoid name conflicts:
drivers/video/riva/fbdev.c:281:9: sparse: preprocessor token MAX_LEVEL redefined
While at it, also make the other names more consistent and add
parentheses.
[akpm@linux-foundation.org: repair fallout]
[sfr@canb.auug.org.au: IB/mlx4: fix for MAX_ID_MASK to MAX_IDR_MASK name change]
Signed-off-by: Fengguang Wu <fengguang.wu@intel.com>
Cc: Bernd Petrovitsch <bernd@petrovitsch.priv.at>
Cc: walter harms <wharms@bfs.de>
Cc: Glauber Costa <glommer@parallels.com>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Roland Dreier <roland@purestorage.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/idr.c | 32 |
1 files changed, 16 insertions, 16 deletions
@@ -20,7 +20,7 @@ | |||
20 | * that id to this code and it returns your pointer. | 20 | * that id to this code and it returns your pointer. |
21 | 21 | ||
22 | * You can release ids at any time. When all ids are released, most of | 22 | * You can release ids at any time. When all ids are released, most of |
23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we | 23 | * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we |
24 | * don't need to go to the memory "store" during an id allocate, just | 24 | * don't need to go to the memory "store" during an id allocate, just |
25 | * so you don't need to be too concerned about locking and conflicts | 25 | * so you don't need to be too concerned about locking and conflicts |
26 | * with the slab allocator. | 26 | * with the slab allocator. |
@@ -122,7 +122,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) | |||
122 | */ | 122 | */ |
123 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | 123 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
124 | { | 124 | { |
125 | while (idp->id_free_cnt < IDR_FREE_MAX) { | 125 | while (idp->id_free_cnt < MAX_IDR_FREE) { |
126 | struct idr_layer *new; | 126 | struct idr_layer *new; |
127 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | 127 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); |
128 | if (new == NULL) | 128 | if (new == NULL) |
@@ -179,7 +179,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
179 | sh = IDR_BITS*l; | 179 | sh = IDR_BITS*l; |
180 | id = ((id >> sh) ^ n ^ m) << sh; | 180 | id = ((id >> sh) ^ n ^ m) << sh; |
181 | } | 181 | } |
182 | if ((id >= MAX_ID_BIT) || (id < 0)) | 182 | if ((id >= MAX_IDR_BIT) || (id < 0)) |
183 | return IDR_NOMORE_SPACE; | 183 | return IDR_NOMORE_SPACE; |
184 | if (l == 0) | 184 | if (l == 0) |
185 | break; | 185 | break; |
@@ -223,7 +223,7 @@ build_up: | |||
223 | * Add a new layer to the top of the tree if the requested | 223 | * Add a new layer to the top of the tree if the requested |
224 | * id is larger than the currently allocated space. | 224 | * id is larger than the currently allocated space. |
225 | */ | 225 | */ |
226 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { | 226 | while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
227 | layers++; | 227 | layers++; |
228 | if (!p->count) { | 228 | if (!p->count) { |
229 | /* special case: if the tree is currently empty, | 229 | /* special case: if the tree is currently empty, |
@@ -265,7 +265,7 @@ build_up: | |||
265 | 265 | ||
266 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | 266 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
267 | { | 267 | { |
268 | struct idr_layer *pa[MAX_LEVEL]; | 268 | struct idr_layer *pa[MAX_IDR_LEVEL]; |
269 | int id; | 269 | int id; |
270 | 270 | ||
271 | id = idr_get_empty_slot(idp, starting_id, pa); | 271 | id = idr_get_empty_slot(idp, starting_id, pa); |
@@ -357,7 +357,7 @@ static void idr_remove_warning(int id) | |||
357 | static void sub_remove(struct idr *idp, int shift, int id) | 357 | static void sub_remove(struct idr *idp, int shift, int id) |
358 | { | 358 | { |
359 | struct idr_layer *p = idp->top; | 359 | struct idr_layer *p = idp->top; |
360 | struct idr_layer **pa[MAX_LEVEL]; | 360 | struct idr_layer **pa[MAX_IDR_LEVEL]; |
361 | struct idr_layer ***paa = &pa[0]; | 361 | struct idr_layer ***paa = &pa[0]; |
362 | struct idr_layer *to_free; | 362 | struct idr_layer *to_free; |
363 | int n; | 363 | int n; |
@@ -402,7 +402,7 @@ void idr_remove(struct idr *idp, int id) | |||
402 | struct idr_layer *to_free; | 402 | struct idr_layer *to_free; |
403 | 403 | ||
404 | /* Mask off upper bits we don't use for the search. */ | 404 | /* Mask off upper bits we don't use for the search. */ |
405 | id &= MAX_ID_MASK; | 405 | id &= MAX_IDR_MASK; |
406 | 406 | ||
407 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | 407 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
408 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && | 408 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
@@ -420,7 +420,7 @@ void idr_remove(struct idr *idp, int id) | |||
420 | to_free->bitmap = to_free->count = 0; | 420 | to_free->bitmap = to_free->count = 0; |
421 | free_layer(to_free); | 421 | free_layer(to_free); |
422 | } | 422 | } |
423 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | 423 | while (idp->id_free_cnt >= MAX_IDR_FREE) { |
424 | p = get_from_free_list(idp); | 424 | p = get_from_free_list(idp); |
425 | /* | 425 | /* |
426 | * Note: we don't call the rcu callback here, since the only | 426 | * Note: we don't call the rcu callback here, since the only |
@@ -451,7 +451,7 @@ void idr_remove_all(struct idr *idp) | |||
451 | int n, id, max; | 451 | int n, id, max; |
452 | int bt_mask; | 452 | int bt_mask; |
453 | struct idr_layer *p; | 453 | struct idr_layer *p; |
454 | struct idr_layer *pa[MAX_LEVEL]; | 454 | struct idr_layer *pa[MAX_IDR_LEVEL]; |
455 | struct idr_layer **paa = &pa[0]; | 455 | struct idr_layer **paa = &pa[0]; |
456 | 456 | ||
457 | n = idp->layers * IDR_BITS; | 457 | n = idp->layers * IDR_BITS; |
@@ -517,7 +517,7 @@ void *idr_find(struct idr *idp, int id) | |||
517 | n = (p->layer+1) * IDR_BITS; | 517 | n = (p->layer+1) * IDR_BITS; |
518 | 518 | ||
519 | /* Mask off upper bits we don't use for the search. */ | 519 | /* Mask off upper bits we don't use for the search. */ |
520 | id &= MAX_ID_MASK; | 520 | id &= MAX_IDR_MASK; |
521 | 521 | ||
522 | if (id >= (1 << n)) | 522 | if (id >= (1 << n)) |
523 | return NULL; | 523 | return NULL; |
@@ -555,7 +555,7 @@ int idr_for_each(struct idr *idp, | |||
555 | { | 555 | { |
556 | int n, id, max, error = 0; | 556 | int n, id, max, error = 0; |
557 | struct idr_layer *p; | 557 | struct idr_layer *p; |
558 | struct idr_layer *pa[MAX_LEVEL]; | 558 | struct idr_layer *pa[MAX_IDR_LEVEL]; |
559 | struct idr_layer **paa = &pa[0]; | 559 | struct idr_layer **paa = &pa[0]; |
560 | 560 | ||
561 | n = idp->layers * IDR_BITS; | 561 | n = idp->layers * IDR_BITS; |
@@ -601,7 +601,7 @@ EXPORT_SYMBOL(idr_for_each); | |||
601 | */ | 601 | */ |
602 | void *idr_get_next(struct idr *idp, int *nextidp) | 602 | void *idr_get_next(struct idr *idp, int *nextidp) |
603 | { | 603 | { |
604 | struct idr_layer *p, *pa[MAX_LEVEL]; | 604 | struct idr_layer *p, *pa[MAX_IDR_LEVEL]; |
605 | struct idr_layer **paa = &pa[0]; | 605 | struct idr_layer **paa = &pa[0]; |
606 | int id = *nextidp; | 606 | int id = *nextidp; |
607 | int n, max; | 607 | int n, max; |
@@ -659,7 +659,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
659 | 659 | ||
660 | n = (p->layer+1) * IDR_BITS; | 660 | n = (p->layer+1) * IDR_BITS; |
661 | 661 | ||
662 | id &= MAX_ID_MASK; | 662 | id &= MAX_IDR_MASK; |
663 | 663 | ||
664 | if (id >= (1 << n)) | 664 | if (id >= (1 << n)) |
665 | return ERR_PTR(-EINVAL); | 665 | return ERR_PTR(-EINVAL); |
@@ -780,7 +780,7 @@ EXPORT_SYMBOL(ida_pre_get); | |||
780 | */ | 780 | */ |
781 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 781 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
782 | { | 782 | { |
783 | struct idr_layer *pa[MAX_LEVEL]; | 783 | struct idr_layer *pa[MAX_IDR_LEVEL]; |
784 | struct ida_bitmap *bitmap; | 784 | struct ida_bitmap *bitmap; |
785 | unsigned long flags; | 785 | unsigned long flags; |
786 | int idr_id = starting_id / IDA_BITMAP_BITS; | 786 | int idr_id = starting_id / IDA_BITMAP_BITS; |
@@ -793,7 +793,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
793 | if (t < 0) | 793 | if (t < 0) |
794 | return _idr_rc_to_errno(t); | 794 | return _idr_rc_to_errno(t); |
795 | 795 | ||
796 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) | 796 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) |
797 | return -ENOSPC; | 797 | return -ENOSPC; |
798 | 798 | ||
799 | if (t != idr_id) | 799 | if (t != idr_id) |
@@ -827,7 +827,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
827 | } | 827 | } |
828 | 828 | ||
829 | id = idr_id * IDA_BITMAP_BITS + t; | 829 | id = idr_id * IDA_BITMAP_BITS + t; |
830 | if (id >= MAX_ID_BIT) | 830 | if (id >= MAX_IDR_BIT) |
831 | return -ENOSPC; | 831 | return -ENOSPC; |
832 | 832 | ||
833 | __set_bit(t, bitmap->bitmap); | 833 | __set_bit(t, bitmap->bitmap); |