diff options
Diffstat (limited to 'lib/idr.c')
-rw-r--r-- | lib/idr.c | 101 |
1 files changed, 55 insertions, 46 deletions
@@ -106,16 +106,17 @@ static void idr_mark_full(struct idr_layer **pa, int id) | |||
106 | } | 106 | } |
107 | 107 | ||
108 | /** | 108 | /** |
109 | * idr_pre_get - reserver resources for idr allocation | 109 | * idr_pre_get - reserve resources for idr allocation |
110 | * @idp: idr handle | 110 | * @idp: idr handle |
111 | * @gfp_mask: memory allocation flags | 111 | * @gfp_mask: memory allocation flags |
112 | * | 112 | * |
113 | * This function should be called prior to locking and calling the | 113 | * This function should be called prior to calling the idr_get_new* functions. |
114 | * idr_get_new* functions. It preallocates enough memory to satisfy | 114 | * It preallocates enough memory to satisfy the worst possible allocation. The |
115 | * the worst possible allocation. | 115 | * caller should pass in GFP_KERNEL if possible. This of course requires that |
116 | * no spinning locks be held. | ||
116 | * | 117 | * |
117 | * If the system is REALLY out of memory this function returns 0, | 118 | * If the system is REALLY out of memory this function returns %0, |
118 | * otherwise 1. | 119 | * otherwise %1. |
119 | */ | 120 | */ |
120 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | 121 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
121 | { | 122 | { |
@@ -156,10 +157,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
156 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; | 157 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
157 | 158 | ||
158 | /* if already at the top layer, we need to grow */ | 159 | /* if already at the top layer, we need to grow */ |
159 | if (!(p = pa[l])) { | 160 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
160 | *starting_id = id; | 161 | *starting_id = id; |
161 | return IDR_NEED_TO_GROW; | 162 | return IDR_NEED_TO_GROW; |
162 | } | 163 | } |
164 | p = pa[l]; | ||
165 | BUG_ON(!p); | ||
163 | 166 | ||
164 | /* If we need to go up one layer, continue the | 167 | /* If we need to go up one layer, continue the |
165 | * loop; otherwise, restart from the top. | 168 | * loop; otherwise, restart from the top. |
@@ -282,17 +285,19 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
282 | * idr_get_new_above - allocate new idr entry above or equal to a start id | 285 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
283 | * @idp: idr handle | 286 | * @idp: idr handle |
284 | * @ptr: pointer you want associated with the id | 287 | * @ptr: pointer you want associated with the id |
285 | * @start_id: id to start search at | 288 | * @starting_id: id to start search at |
286 | * @id: pointer to the allocated handle | 289 | * @id: pointer to the allocated handle |
287 | * | 290 | * |
288 | * This is the allocate id function. It should be called with any | 291 | * This is the allocate id function. It should be called with any |
289 | * required locks. | 292 | * required locks. |
290 | * | 293 | * |
291 | * If memory is required, it will return -EAGAIN, you should unlock | 294 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
292 | * and go back to the idr_pre_get() call. If the idr is full, it will | 295 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
293 | * return -ENOSPC. | 296 | * IDR's preallocation and then retry the idr_get_new_above() call. |
297 | * | ||
298 | * If the idr is full idr_get_new_above() will return %-ENOSPC. | ||
294 | * | 299 | * |
295 | * @id returns a value in the range @starting_id ... 0x7fffffff | 300 | * @id returns a value in the range @starting_id ... %0x7fffffff |
296 | */ | 301 | */ |
297 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | 302 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
298 | { | 303 | { |
@@ -316,14 +321,13 @@ EXPORT_SYMBOL(idr_get_new_above); | |||
316 | * @ptr: pointer you want associated with the id | 321 | * @ptr: pointer you want associated with the id |
317 | * @id: pointer to the allocated handle | 322 | * @id: pointer to the allocated handle |
318 | * | 323 | * |
319 | * This is the allocate id function. It should be called with any | 324 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
320 | * required locks. | 325 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
326 | * IDR's preallocation and then retry the idr_get_new_above() call. | ||
321 | * | 327 | * |
322 | * If memory is required, it will return -EAGAIN, you should unlock | 328 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
323 | * and go back to the idr_pre_get() call. If the idr is full, it will | ||
324 | * return -ENOSPC. | ||
325 | * | 329 | * |
326 | * @id returns a value in the range 0 ... 0x7fffffff | 330 | * @id returns a value in the range %0 ... %0x7fffffff |
327 | */ | 331 | */ |
328 | int idr_get_new(struct idr *idp, void *ptr, int *id) | 332 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
329 | { | 333 | { |
@@ -386,7 +390,7 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
386 | } | 390 | } |
387 | 391 | ||
388 | /** | 392 | /** |
389 | * idr_remove - remove the given id and free it's slot | 393 | * idr_remove - remove the given id and free its slot |
390 | * @idp: idr handle | 394 | * @idp: idr handle |
391 | * @id: unique key | 395 | * @id: unique key |
392 | */ | 396 | */ |
@@ -435,7 +439,7 @@ EXPORT_SYMBOL(idr_remove); | |||
435 | * function will remove all id mappings and leave all idp_layers | 439 | * function will remove all id mappings and leave all idp_layers |
436 | * unused. | 440 | * unused. |
437 | * | 441 | * |
438 | * A typical clean-up sequence for objects stored in an idr tree, will | 442 | * A typical clean-up sequence for objects stored in an idr tree will |
439 | * use idr_for_each() to free all objects, if necessay, then | 443 | * use idr_for_each() to free all objects, if necessay, then |
440 | * idr_remove_all() to remove all ids, and idr_destroy() to free | 444 | * idr_remove_all() to remove all ids, and idr_destroy() to free |
441 | * up the cached idr_layers. | 445 | * up the cached idr_layers. |
@@ -443,6 +447,7 @@ EXPORT_SYMBOL(idr_remove); | |||
443 | void idr_remove_all(struct idr *idp) | 447 | void idr_remove_all(struct idr *idp) |
444 | { | 448 | { |
445 | int n, id, max; | 449 | int n, id, max; |
450 | int bt_mask; | ||
446 | struct idr_layer *p; | 451 | struct idr_layer *p; |
447 | struct idr_layer *pa[MAX_LEVEL]; | 452 | struct idr_layer *pa[MAX_LEVEL]; |
448 | struct idr_layer **paa = &pa[0]; | 453 | struct idr_layer **paa = &pa[0]; |
@@ -460,8 +465,10 @@ void idr_remove_all(struct idr *idp) | |||
460 | p = p->ary[(id >> n) & IDR_MASK]; | 465 | p = p->ary[(id >> n) & IDR_MASK]; |
461 | } | 466 | } |
462 | 467 | ||
468 | bt_mask = id; | ||
463 | id += 1 << n; | 469 | id += 1 << n; |
464 | while (n < fls(id)) { | 470 | /* Get the highest bit that the above add changed from 0->1. */ |
471 | while (n < fls(id ^ bt_mask)) { | ||
465 | if (p) | 472 | if (p) |
466 | free_layer(p); | 473 | free_layer(p); |
467 | n += IDR_BITS; | 474 | n += IDR_BITS; |
@@ -474,7 +481,7 @@ EXPORT_SYMBOL(idr_remove_all); | |||
474 | 481 | ||
475 | /** | 482 | /** |
476 | * idr_destroy - release all cached layers within an idr tree | 483 | * idr_destroy - release all cached layers within an idr tree |
477 | * idp: idr handle | 484 | * @idp: idr handle |
478 | */ | 485 | */ |
479 | void idr_destroy(struct idr *idp) | 486 | void idr_destroy(struct idr *idp) |
480 | { | 487 | { |
@@ -502,7 +509,7 @@ void *idr_find(struct idr *idp, int id) | |||
502 | int n; | 509 | int n; |
503 | struct idr_layer *p; | 510 | struct idr_layer *p; |
504 | 511 | ||
505 | p = rcu_dereference(idp->top); | 512 | p = rcu_dereference_raw(idp->top); |
506 | if (!p) | 513 | if (!p) |
507 | return NULL; | 514 | return NULL; |
508 | n = (p->layer+1) * IDR_BITS; | 515 | n = (p->layer+1) * IDR_BITS; |
@@ -517,7 +524,7 @@ void *idr_find(struct idr *idp, int id) | |||
517 | while (n > 0 && p) { | 524 | while (n > 0 && p) { |
518 | n -= IDR_BITS; | 525 | n -= IDR_BITS; |
519 | BUG_ON(n != p->layer*IDR_BITS); | 526 | BUG_ON(n != p->layer*IDR_BITS); |
520 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 527 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
521 | } | 528 | } |
522 | return((void *)p); | 529 | return((void *)p); |
523 | } | 530 | } |
@@ -537,7 +544,7 @@ EXPORT_SYMBOL(idr_find); | |||
537 | * not allowed. | 544 | * not allowed. |
538 | * | 545 | * |
539 | * We check the return of @fn each time. If it returns anything other | 546 | * We check the return of @fn each time. If it returns anything other |
540 | * than 0, we break out and return that value. | 547 | * than %0, we break out and return that value. |
541 | * | 548 | * |
542 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). | 549 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
543 | */ | 550 | */ |
@@ -550,7 +557,7 @@ int idr_for_each(struct idr *idp, | |||
550 | struct idr_layer **paa = &pa[0]; | 557 | struct idr_layer **paa = &pa[0]; |
551 | 558 | ||
552 | n = idp->layers * IDR_BITS; | 559 | n = idp->layers * IDR_BITS; |
553 | p = rcu_dereference(idp->top); | 560 | p = rcu_dereference_raw(idp->top); |
554 | max = 1 << n; | 561 | max = 1 << n; |
555 | 562 | ||
556 | id = 0; | 563 | id = 0; |
@@ -558,7 +565,7 @@ int idr_for_each(struct idr *idp, | |||
558 | while (n > 0 && p) { | 565 | while (n > 0 && p) { |
559 | n -= IDR_BITS; | 566 | n -= IDR_BITS; |
560 | *paa++ = p; | 567 | *paa++ = p; |
561 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 568 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
562 | } | 569 | } |
563 | 570 | ||
564 | if (p) { | 571 | if (p) { |
@@ -581,10 +588,11 @@ EXPORT_SYMBOL(idr_for_each); | |||
581 | /** | 588 | /** |
582 | * idr_get_next - lookup next object of id to given id. | 589 | * idr_get_next - lookup next object of id to given id. |
583 | * @idp: idr handle | 590 | * @idp: idr handle |
584 | * @id: pointer to lookup key | 591 | * @nextidp: pointer to lookup key |
585 | * | 592 | * |
586 | * Returns pointer to registered object with id, which is next number to | 593 | * Returns pointer to registered object with id, which is next number to |
587 | * given id. | 594 | * given id. After being looked up, *@nextidp will be updated for the next |
595 | * iteration. | ||
588 | */ | 596 | */ |
589 | 597 | ||
590 | void *idr_get_next(struct idr *idp, int *nextidp) | 598 | void *idr_get_next(struct idr *idp, int *nextidp) |
@@ -597,7 +605,7 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
597 | /* find first ent */ | 605 | /* find first ent */ |
598 | n = idp->layers * IDR_BITS; | 606 | n = idp->layers * IDR_BITS; |
599 | max = 1 << n; | 607 | max = 1 << n; |
600 | p = rcu_dereference(idp->top); | 608 | p = rcu_dereference_raw(idp->top); |
601 | if (!p) | 609 | if (!p) |
602 | return NULL; | 610 | return NULL; |
603 | 611 | ||
@@ -605,7 +613,7 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
605 | while (n > 0 && p) { | 613 | while (n > 0 && p) { |
606 | n -= IDR_BITS; | 614 | n -= IDR_BITS; |
607 | *paa++ = p; | 615 | *paa++ = p; |
608 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 616 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
609 | } | 617 | } |
610 | 618 | ||
611 | if (p) { | 619 | if (p) { |
@@ -621,7 +629,7 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
621 | } | 629 | } |
622 | return NULL; | 630 | return NULL; |
623 | } | 631 | } |
624 | 632 | EXPORT_SYMBOL(idr_get_next); | |
625 | 633 | ||
626 | 634 | ||
627 | /** | 635 | /** |
@@ -631,8 +639,8 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
631 | * @id: lookup key | 639 | * @id: lookup key |
632 | * | 640 | * |
633 | * Replace the pointer registered with an id and return the old value. | 641 | * Replace the pointer registered with an id and return the old value. |
634 | * A -ENOENT return indicates that @id was not found. | 642 | * A %-ENOENT return indicates that @id was not found. |
635 | * A -EINVAL return indicates that @id was not within valid constraints. | 643 | * A %-EINVAL return indicates that @id was not within valid constraints. |
636 | * | 644 | * |
637 | * The caller must serialize with writers. | 645 | * The caller must serialize with writers. |
638 | */ | 646 | */ |
@@ -690,10 +698,11 @@ void idr_init(struct idr *idp) | |||
690 | EXPORT_SYMBOL(idr_init); | 698 | EXPORT_SYMBOL(idr_init); |
691 | 699 | ||
692 | 700 | ||
693 | /* | 701 | /** |
702 | * DOC: IDA description | ||
694 | * IDA - IDR based ID allocator | 703 | * IDA - IDR based ID allocator |
695 | * | 704 | * |
696 | * this is id allocator without id -> pointer translation. Memory | 705 | * This is id allocator without id -> pointer translation. Memory |
697 | * usage is much lower than full blown idr because each id only | 706 | * usage is much lower than full blown idr because each id only |
698 | * occupies a bit. ida uses a custom leaf node which contains | 707 | * occupies a bit. ida uses a custom leaf node which contains |
699 | * IDA_BITMAP_BITS slots. | 708 | * IDA_BITMAP_BITS slots. |
@@ -726,8 +735,8 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | |||
726 | * following function. It preallocates enough memory to satisfy the | 735 | * following function. It preallocates enough memory to satisfy the |
727 | * worst possible allocation. | 736 | * worst possible allocation. |
728 | * | 737 | * |
729 | * If the system is REALLY out of memory this function returns 0, | 738 | * If the system is REALLY out of memory this function returns %0, |
730 | * otherwise 1. | 739 | * otherwise %1. |
731 | */ | 740 | */ |
732 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | 741 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
733 | { | 742 | { |
@@ -753,17 +762,17 @@ EXPORT_SYMBOL(ida_pre_get); | |||
753 | /** | 762 | /** |
754 | * ida_get_new_above - allocate new ID above or equal to a start id | 763 | * ida_get_new_above - allocate new ID above or equal to a start id |
755 | * @ida: ida handle | 764 | * @ida: ida handle |
756 | * @staring_id: id to start search at | 765 | * @starting_id: id to start search at |
757 | * @p_id: pointer to the allocated handle | 766 | * @p_id: pointer to the allocated handle |
758 | * | 767 | * |
759 | * Allocate new ID above or equal to @ida. It should be called with | 768 | * Allocate new ID above or equal to @ida. It should be called with |
760 | * any required locks. | 769 | * any required locks. |
761 | * | 770 | * |
762 | * If memory is required, it will return -EAGAIN, you should unlock | 771 | * If memory is required, it will return %-EAGAIN, you should unlock |
763 | * and go back to the ida_pre_get() call. If the ida is full, it will | 772 | * and go back to the ida_pre_get() call. If the ida is full, it will |
764 | * return -ENOSPC. | 773 | * return %-ENOSPC. |
765 | * | 774 | * |
766 | * @p_id returns a value in the range @starting_id ... 0x7fffffff. | 775 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. |
767 | */ | 776 | */ |
768 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 777 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
769 | { | 778 | { |
@@ -845,11 +854,11 @@ EXPORT_SYMBOL(ida_get_new_above); | |||
845 | * | 854 | * |
846 | * Allocate new ID. It should be called with any required locks. | 855 | * Allocate new ID. It should be called with any required locks. |
847 | * | 856 | * |
848 | * If memory is required, it will return -EAGAIN, you should unlock | 857 | * If memory is required, it will return %-EAGAIN, you should unlock |
849 | * and go back to the idr_pre_get() call. If the idr is full, it will | 858 | * and go back to the idr_pre_get() call. If the idr is full, it will |
850 | * return -ENOSPC. | 859 | * return %-ENOSPC. |
851 | * | 860 | * |
852 | * @id returns a value in the range 0 ... 0x7fffffff. | 861 | * @id returns a value in the range %0 ... %0x7fffffff. |
853 | */ | 862 | */ |
854 | int ida_get_new(struct ida *ida, int *p_id) | 863 | int ida_get_new(struct ida *ida, int *p_id) |
855 | { | 864 | { |
@@ -907,7 +916,7 @@ EXPORT_SYMBOL(ida_remove); | |||
907 | 916 | ||
908 | /** | 917 | /** |
909 | * ida_destroy - release all cached layers within an ida tree | 918 | * ida_destroy - release all cached layers within an ida tree |
910 | * ida: ida handle | 919 | * @ida: ida handle |
911 | */ | 920 | */ |
912 | void ida_destroy(struct ida *ida) | 921 | void ida_destroy(struct ida *ida) |
913 | { | 922 | { |