diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/idr.c | 38 |
1 files changed, 23 insertions, 15 deletions
@@ -43,6 +43,14 @@ static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head); | |||
43 | static DEFINE_PER_CPU(int, idr_preload_cnt); | 43 | static DEFINE_PER_CPU(int, idr_preload_cnt); |
44 | static DEFINE_SPINLOCK(simple_ida_lock); | 44 | static DEFINE_SPINLOCK(simple_ida_lock); |
45 | 45 | ||
46 | /* the maximum ID which can be allocated given idr->layers */ | ||
47 | static int idr_max(int layers) | ||
48 | { | ||
49 | int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); | ||
50 | |||
51 | return (1 << bits) - 1; | ||
52 | } | ||
53 | |||
46 | static struct idr_layer *get_from_free_list(struct idr *idp) | 54 | static struct idr_layer *get_from_free_list(struct idr *idp) |
47 | { | 55 | { |
48 | struct idr_layer *p; | 56 | struct idr_layer *p; |
@@ -290,7 +298,7 @@ build_up: | |||
290 | * Add a new layer to the top of the tree if the requested | 298 | * Add a new layer to the top of the tree if the requested |
291 | * id is larger than the currently allocated space. | 299 | * id is larger than the currently allocated space. |
292 | */ | 300 | */ |
293 | while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { | 301 | while (id > idr_max(layers)) { |
294 | layers++; | 302 | layers++; |
295 | if (!p->count) { | 303 | if (!p->count) { |
296 | /* special case: if the tree is currently empty, | 304 | /* special case: if the tree is currently empty, |
@@ -361,7 +369,7 @@ static void idr_fill_slot(void *ptr, int id, struct idr_layer **pa) | |||
361 | */ | 369 | */ |
362 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | 370 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
363 | { | 371 | { |
364 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 372 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
365 | int rv; | 373 | int rv; |
366 | 374 | ||
367 | rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); | 375 | rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); |
@@ -457,7 +465,7 @@ EXPORT_SYMBOL(idr_preload); | |||
457 | int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) | 465 | int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) |
458 | { | 466 | { |
459 | int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ | 467 | int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ |
460 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 468 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
461 | int id; | 469 | int id; |
462 | 470 | ||
463 | might_sleep_if(gfp_mask & __GFP_WAIT); | 471 | might_sleep_if(gfp_mask & __GFP_WAIT); |
@@ -490,7 +498,7 @@ static void idr_remove_warning(int id) | |||
490 | static void sub_remove(struct idr *idp, int shift, int id) | 498 | static void sub_remove(struct idr *idp, int shift, int id) |
491 | { | 499 | { |
492 | struct idr_layer *p = idp->top; | 500 | struct idr_layer *p = idp->top; |
493 | struct idr_layer **pa[MAX_IDR_LEVEL]; | 501 | struct idr_layer **pa[MAX_IDR_LEVEL + 1]; |
494 | struct idr_layer ***paa = &pa[0]; | 502 | struct idr_layer ***paa = &pa[0]; |
495 | struct idr_layer *to_free; | 503 | struct idr_layer *to_free; |
496 | int n; | 504 | int n; |
@@ -571,16 +579,16 @@ void __idr_remove_all(struct idr *idp) | |||
571 | int n, id, max; | 579 | int n, id, max; |
572 | int bt_mask; | 580 | int bt_mask; |
573 | struct idr_layer *p; | 581 | struct idr_layer *p; |
574 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 582 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
575 | struct idr_layer **paa = &pa[0]; | 583 | struct idr_layer **paa = &pa[0]; |
576 | 584 | ||
577 | n = idp->layers * IDR_BITS; | 585 | n = idp->layers * IDR_BITS; |
578 | p = idp->top; | 586 | p = idp->top; |
579 | rcu_assign_pointer(idp->top, NULL); | 587 | rcu_assign_pointer(idp->top, NULL); |
580 | max = 1 << n; | 588 | max = idr_max(idp->layers); |
581 | 589 | ||
582 | id = 0; | 590 | id = 0; |
583 | while (id < max) { | 591 | while (id >= 0 && id <= max) { |
584 | while (n > IDR_BITS && p) { | 592 | while (n > IDR_BITS && p) { |
585 | n -= IDR_BITS; | 593 | n -= IDR_BITS; |
586 | *paa++ = p; | 594 | *paa++ = p; |
@@ -650,7 +658,7 @@ void *idr_find(struct idr *idp, int id) | |||
650 | /* Mask off upper bits we don't use for the search. */ | 658 | /* Mask off upper bits we don't use for the search. */ |
651 | id &= MAX_IDR_MASK; | 659 | id &= MAX_IDR_MASK; |
652 | 660 | ||
653 | if (id >= (1 << n)) | 661 | if (id > idr_max(p->layer + 1)) |
654 | return NULL; | 662 | return NULL; |
655 | BUG_ON(n == 0); | 663 | BUG_ON(n == 0); |
656 | 664 | ||
@@ -686,15 +694,15 @@ int idr_for_each(struct idr *idp, | |||
686 | { | 694 | { |
687 | int n, id, max, error = 0; | 695 | int n, id, max, error = 0; |
688 | struct idr_layer *p; | 696 | struct idr_layer *p; |
689 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 697 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
690 | struct idr_layer **paa = &pa[0]; | 698 | struct idr_layer **paa = &pa[0]; |
691 | 699 | ||
692 | n = idp->layers * IDR_BITS; | 700 | n = idp->layers * IDR_BITS; |
693 | p = rcu_dereference_raw(idp->top); | 701 | p = rcu_dereference_raw(idp->top); |
694 | max = 1 << n; | 702 | max = idr_max(idp->layers); |
695 | 703 | ||
696 | id = 0; | 704 | id = 0; |
697 | while (id < max) { | 705 | while (id >= 0 && id <= max) { |
698 | while (n > 0 && p) { | 706 | while (n > 0 && p) { |
699 | n -= IDR_BITS; | 707 | n -= IDR_BITS; |
700 | *paa++ = p; | 708 | *paa++ = p; |
@@ -732,7 +740,7 @@ EXPORT_SYMBOL(idr_for_each); | |||
732 | */ | 740 | */ |
733 | void *idr_get_next(struct idr *idp, int *nextidp) | 741 | void *idr_get_next(struct idr *idp, int *nextidp) |
734 | { | 742 | { |
735 | struct idr_layer *p, *pa[MAX_IDR_LEVEL]; | 743 | struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; |
736 | struct idr_layer **paa = &pa[0]; | 744 | struct idr_layer **paa = &pa[0]; |
737 | int id = *nextidp; | 745 | int id = *nextidp; |
738 | int n, max; | 746 | int n, max; |
@@ -742,9 +750,9 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
742 | if (!p) | 750 | if (!p) |
743 | return NULL; | 751 | return NULL; |
744 | n = (p->layer + 1) * IDR_BITS; | 752 | n = (p->layer + 1) * IDR_BITS; |
745 | max = 1 << n; | 753 | max = idr_max(p->layer + 1); |
746 | 754 | ||
747 | while (id < max) { | 755 | while (id >= 0 && id <= max) { |
748 | while (n > 0 && p) { | 756 | while (n > 0 && p) { |
749 | n -= IDR_BITS; | 757 | n -= IDR_BITS; |
750 | *paa++ = p; | 758 | *paa++ = p; |
@@ -918,7 +926,7 @@ EXPORT_SYMBOL(ida_pre_get); | |||
918 | */ | 926 | */ |
919 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 927 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
920 | { | 928 | { |
921 | struct idr_layer *pa[MAX_IDR_LEVEL]; | 929 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
922 | struct ida_bitmap *bitmap; | 930 | struct ida_bitmap *bitmap; |
923 | unsigned long flags; | 931 | unsigned long flags; |
924 | int idr_id = starting_id / IDA_BITMAP_BITS; | 932 | int idr_id = starting_id / IDA_BITMAP_BITS; |