diff options
Diffstat (limited to 'lib/idr.c')
-rw-r--r-- | lib/idr.c | 78 |
1 files changed, 41 insertions, 37 deletions
@@ -106,16 +106,17 @@ static void idr_mark_full(struct idr_layer **pa, int id) | |||
106 | } | 106 | } |
107 | 107 | ||
108 | /** | 108 | /** |
109 | * idr_pre_get - reserver resources for idr allocation | 109 | * idr_pre_get - reserve resources for idr allocation |
110 | * @idp: idr handle | 110 | * @idp: idr handle |
111 | * @gfp_mask: memory allocation flags | 111 | * @gfp_mask: memory allocation flags |
112 | * | 112 | * |
113 | * This function should be called prior to locking and calling the | 113 | * This function should be called prior to calling the idr_get_new* functions. |
114 | * idr_get_new* functions. It preallocates enough memory to satisfy | 114 | * It preallocates enough memory to satisfy the worst possible allocation. The |
115 | * the worst possible allocation. | 115 | * caller should pass in GFP_KERNEL if possible. This of course requires that |
116 | * no spinning locks be held. | ||
116 | * | 117 | * |
117 | * If the system is REALLY out of memory this function returns 0, | 118 | * If the system is REALLY out of memory this function returns %0, |
118 | * otherwise 1. | 119 | * otherwise %1. |
119 | */ | 120 | */ |
120 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | 121 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
121 | { | 122 | { |
@@ -284,17 +285,19 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
284 | * idr_get_new_above - allocate new idr entry above or equal to a start id | 285 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
285 | * @idp: idr handle | 286 | * @idp: idr handle |
286 | * @ptr: pointer you want associated with the id | 287 | * @ptr: pointer you want associated with the id |
287 | * @start_id: id to start search at | 288 | * @starting_id: id to start search at |
288 | * @id: pointer to the allocated handle | 289 | * @id: pointer to the allocated handle |
289 | * | 290 | * |
290 | * This is the allocate id function. It should be called with any | 291 | * This is the allocate id function. It should be called with any |
291 | * required locks. | 292 | * required locks. |
292 | * | 293 | * |
293 | * If memory is required, it will return -EAGAIN, you should unlock | 294 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
294 | * and go back to the idr_pre_get() call. If the idr is full, it will | 295 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
295 | * return -ENOSPC. | 296 | * IDR's preallocation and then retry the idr_get_new_above() call. |
297 | * | ||
298 | * If the idr is full idr_get_new_above() will return %-ENOSPC. | ||
296 | * | 299 | * |
297 | * @id returns a value in the range @starting_id ... 0x7fffffff | 300 | * @id returns a value in the range @starting_id ... %0x7fffffff |
298 | */ | 301 | */ |
299 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | 302 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
300 | { | 303 | { |
@@ -318,14 +321,13 @@ EXPORT_SYMBOL(idr_get_new_above); | |||
318 | * @ptr: pointer you want associated with the id | 321 | * @ptr: pointer you want associated with the id |
319 | * @id: pointer to the allocated handle | 322 | * @id: pointer to the allocated handle |
320 | * | 323 | * |
321 | * This is the allocate id function. It should be called with any | 324 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
322 | * required locks. | 325 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
326 | * IDR's preallocation and then retry the idr_get_new_above() call. | ||
323 | * | 327 | * |
324 | * If memory is required, it will return -EAGAIN, you should unlock | 328 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
325 | * and go back to the idr_pre_get() call. If the idr is full, it will | ||
326 | * return -ENOSPC. | ||
327 | * | 329 | * |
328 | * @id returns a value in the range 0 ... 0x7fffffff | 330 | * @id returns a value in the range %0 ... %0x7fffffff |
329 | */ | 331 | */ |
330 | int idr_get_new(struct idr *idp, void *ptr, int *id) | 332 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
331 | { | 333 | { |
@@ -388,7 +390,7 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
388 | } | 390 | } |
389 | 391 | ||
390 | /** | 392 | /** |
391 | * idr_remove - remove the given id and free it's slot | 393 | * idr_remove - remove the given id and free its slot |
392 | * @idp: idr handle | 394 | * @idp: idr handle |
393 | * @id: unique key | 395 | * @id: unique key |
394 | */ | 396 | */ |
@@ -437,7 +439,7 @@ EXPORT_SYMBOL(idr_remove); | |||
437 | * function will remove all id mappings and leave all idp_layers | 439 | * function will remove all id mappings and leave all idp_layers |
438 | * unused. | 440 | * unused. |
439 | * | 441 | * |
440 | * A typical clean-up sequence for objects stored in an idr tree, will | 442 | * A typical clean-up sequence for objects stored in an idr tree will |
441 | * use idr_for_each() to free all objects, if necessay, then | 443 | * use idr_for_each() to free all objects, if necessay, then |
442 | * idr_remove_all() to remove all ids, and idr_destroy() to free | 444 | * idr_remove_all() to remove all ids, and idr_destroy() to free |
443 | * up the cached idr_layers. | 445 | * up the cached idr_layers. |
@@ -479,7 +481,7 @@ EXPORT_SYMBOL(idr_remove_all); | |||
479 | 481 | ||
480 | /** | 482 | /** |
481 | * idr_destroy - release all cached layers within an idr tree | 483 | * idr_destroy - release all cached layers within an idr tree |
482 | * idp: idr handle | 484 | * @idp: idr handle |
483 | */ | 485 | */ |
484 | void idr_destroy(struct idr *idp) | 486 | void idr_destroy(struct idr *idp) |
485 | { | 487 | { |
@@ -542,7 +544,7 @@ EXPORT_SYMBOL(idr_find); | |||
542 | * not allowed. | 544 | * not allowed. |
543 | * | 545 | * |
544 | * We check the return of @fn each time. If it returns anything other | 546 | * We check the return of @fn each time. If it returns anything other |
545 | * than 0, we break out and return that value. | 547 | * than %0, we break out and return that value. |
546 | * | 548 | * |
547 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). | 549 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
548 | */ | 550 | */ |
@@ -586,10 +588,11 @@ EXPORT_SYMBOL(idr_for_each); | |||
586 | /** | 588 | /** |
587 | * idr_get_next - lookup next object of id to given id. | 589 | * idr_get_next - lookup next object of id to given id. |
588 | * @idp: idr handle | 590 | * @idp: idr handle |
589 | * @id: pointer to lookup key | 591 | * @nextidp: pointer to lookup key |
590 | * | 592 | * |
591 | * Returns pointer to registered object with id, which is next number to | 593 | * Returns pointer to registered object with id, which is next number to |
592 | * given id. | 594 | * given id. After being looked up, *@nextidp will be updated for the next |
595 | * iteration. | ||
593 | */ | 596 | */ |
594 | 597 | ||
595 | void *idr_get_next(struct idr *idp, int *nextidp) | 598 | void *idr_get_next(struct idr *idp, int *nextidp) |
@@ -636,8 +639,8 @@ EXPORT_SYMBOL(idr_get_next); | |||
636 | * @id: lookup key | 639 | * @id: lookup key |
637 | * | 640 | * |
638 | * Replace the pointer registered with an id and return the old value. | 641 | * Replace the pointer registered with an id and return the old value. |
639 | * A -ENOENT return indicates that @id was not found. | 642 | * A %-ENOENT return indicates that @id was not found. |
640 | * A -EINVAL return indicates that @id was not within valid constraints. | 643 | * A %-EINVAL return indicates that @id was not within valid constraints. |
641 | * | 644 | * |
642 | * The caller must serialize with writers. | 645 | * The caller must serialize with writers. |
643 | */ | 646 | */ |
@@ -695,10 +698,11 @@ void idr_init(struct idr *idp) | |||
695 | EXPORT_SYMBOL(idr_init); | 698 | EXPORT_SYMBOL(idr_init); |
696 | 699 | ||
697 | 700 | ||
698 | /* | 701 | /** |
702 | * DOC: IDA description | ||
699 | * IDA - IDR based ID allocator | 703 | * IDA - IDR based ID allocator |
700 | * | 704 | * |
701 | * this is id allocator without id -> pointer translation. Memory | 705 | * This is id allocator without id -> pointer translation. Memory |
702 | * usage is much lower than full blown idr because each id only | 706 | * usage is much lower than full blown idr because each id only |
703 | * occupies a bit. ida uses a custom leaf node which contains | 707 | * occupies a bit. ida uses a custom leaf node which contains |
704 | * IDA_BITMAP_BITS slots. | 708 | * IDA_BITMAP_BITS slots. |
@@ -731,8 +735,8 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | |||
731 | * following function. It preallocates enough memory to satisfy the | 735 | * following function. It preallocates enough memory to satisfy the |
732 | * worst possible allocation. | 736 | * worst possible allocation. |
733 | * | 737 | * |
734 | * If the system is REALLY out of memory this function returns 0, | 738 | * If the system is REALLY out of memory this function returns %0, |
735 | * otherwise 1. | 739 | * otherwise %1. |
736 | */ | 740 | */ |
737 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | 741 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
738 | { | 742 | { |
@@ -758,17 +762,17 @@ EXPORT_SYMBOL(ida_pre_get); | |||
758 | /** | 762 | /** |
759 | * ida_get_new_above - allocate new ID above or equal to a start id | 763 | * ida_get_new_above - allocate new ID above or equal to a start id |
760 | * @ida: ida handle | 764 | * @ida: ida handle |
761 | * @staring_id: id to start search at | 765 | * @starting_id: id to start search at |
762 | * @p_id: pointer to the allocated handle | 766 | * @p_id: pointer to the allocated handle |
763 | * | 767 | * |
764 | * Allocate new ID above or equal to @ida. It should be called with | 768 | * Allocate new ID above or equal to @ida. It should be called with |
765 | * any required locks. | 769 | * any required locks. |
766 | * | 770 | * |
767 | * If memory is required, it will return -EAGAIN, you should unlock | 771 | * If memory is required, it will return %-EAGAIN, you should unlock |
768 | * and go back to the ida_pre_get() call. If the ida is full, it will | 772 | * and go back to the ida_pre_get() call. If the ida is full, it will |
769 | * return -ENOSPC. | 773 | * return %-ENOSPC. |
770 | * | 774 | * |
771 | * @p_id returns a value in the range @starting_id ... 0x7fffffff. | 775 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. |
772 | */ | 776 | */ |
773 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 777 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
774 | { | 778 | { |
@@ -850,11 +854,11 @@ EXPORT_SYMBOL(ida_get_new_above); | |||
850 | * | 854 | * |
851 | * Allocate new ID. It should be called with any required locks. | 855 | * Allocate new ID. It should be called with any required locks. |
852 | * | 856 | * |
853 | * If memory is required, it will return -EAGAIN, you should unlock | 857 | * If memory is required, it will return %-EAGAIN, you should unlock |
854 | * and go back to the idr_pre_get() call. If the idr is full, it will | 858 | * and go back to the idr_pre_get() call. If the idr is full, it will |
855 | * return -ENOSPC. | 859 | * return %-ENOSPC. |
856 | * | 860 | * |
857 | * @id returns a value in the range 0 ... 0x7fffffff. | 861 | * @id returns a value in the range %0 ... %0x7fffffff. |
858 | */ | 862 | */ |
859 | int ida_get_new(struct ida *ida, int *p_id) | 863 | int ida_get_new(struct ida *ida, int *p_id) |
860 | { | 864 | { |
@@ -912,7 +916,7 @@ EXPORT_SYMBOL(ida_remove); | |||
912 | 916 | ||
913 | /** | 917 | /** |
914 | * ida_destroy - release all cached layers within an ida tree | 918 | * ida_destroy - release all cached layers within an ida tree |
915 | * ida: ida handle | 919 | * @ida: ida handle |
916 | */ | 920 | */ |
917 | void ida_destroy(struct ida *ida) | 921 | void ida_destroy(struct ida *ida) |
918 | { | 922 | { |