diff options
| -rw-r--r-- | Documentation/DocBook/kernel-api.tmpl | 6 | ||||
| -rw-r--r-- | include/linux/idr.h | 1 | ||||
| -rw-r--r-- | lib/idr.c | 49 |
3 files changed, 32 insertions, 24 deletions
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl index 6b4e07f28b69..7160652a8736 100644 --- a/Documentation/DocBook/kernel-api.tmpl +++ b/Documentation/DocBook/kernel-api.tmpl | |||
| @@ -93,6 +93,12 @@ X!Ilib/string.c | |||
| 93 | !Elib/crc32.c | 93 | !Elib/crc32.c |
| 94 | !Elib/crc-ccitt.c | 94 | !Elib/crc-ccitt.c |
| 95 | </sect1> | 95 | </sect1> |
| 96 | |||
| 97 | <sect1 id="idr"><title>idr/ida Functions</title> | ||
| 98 | !Pinclude/linux/idr.h idr sync | ||
| 99 | !Plib/idr.c IDA description | ||
| 100 | !Elib/idr.c | ||
| 101 | </sect1> | ||
| 96 | </chapter> | 102 | </chapter> |
| 97 | 103 | ||
| 98 | <chapter id="mm"> | 104 | <chapter id="mm"> |
diff --git a/include/linux/idr.h b/include/linux/idr.h index 928ae712709f..13a801f3d028 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h | |||
| @@ -81,6 +81,7 @@ struct idr { | |||
| 81 | #define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC) | 81 | #define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC) |
| 82 | 82 | ||
| 83 | /** | 83 | /** |
| 84 | * DOC: idr sync | ||
| 84 | * idr synchronization (stolen from radix-tree.h) | 85 | * idr synchronization (stolen from radix-tree.h) |
| 85 | * | 86 | * |
| 86 | * idr_find() is able to be called locklessly, using RCU. The caller must | 87 | * idr_find() is able to be called locklessly, using RCU. The caller must |
| @@ -106,7 +106,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) | |||
| 106 | } | 106 | } |
| 107 | 107 | ||
| 108 | /** | 108 | /** |
| 109 | * idr_pre_get - reserver resources for idr allocation | 109 | * idr_pre_get - reserve resources for idr allocation |
| 110 | * @idp: idr handle | 110 | * @idp: idr handle |
| 111 | * @gfp_mask: memory allocation flags | 111 | * @gfp_mask: memory allocation flags |
| 112 | * | 112 | * |
| @@ -115,8 +115,8 @@ static void idr_mark_full(struct idr_layer **pa, int id) | |||
| 115 | * caller should pass in GFP_KERNEL if possible. This of course requires that | 115 | * caller should pass in GFP_KERNEL if possible. This of course requires that |
| 116 | * no spinning locks be held. | 116 | * no spinning locks be held. |
| 117 | * | 117 | * |
| 118 | * If the system is REALLY out of memory this function returns 0, | 118 | * If the system is REALLY out of memory this function returns %0, |
| 119 | * otherwise 1. | 119 | * otherwise %1. |
| 120 | */ | 120 | */ |
| 121 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | 121 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
| 122 | { | 122 | { |
| @@ -292,12 +292,12 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
| 292 | * required locks. | 292 | * required locks. |
| 293 | * | 293 | * |
| 294 | * If allocation from IDR's private freelist fails, idr_get_new_above() will | 294 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
| 295 | * return -EAGAIN. The caller should retry the idr_pre_get() call to refill | 295 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
| 296 | * IDR's preallocation and then retry the idr_get_new_above() call. | 296 | * IDR's preallocation and then retry the idr_get_new_above() call. |
| 297 | * | 297 | * |
| 298 | * If the idr is full idr_get_new_above() will return -ENOSPC. | 298 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
| 299 | * | 299 | * |
| 300 | * @id returns a value in the range @starting_id ... 0x7fffffff | 300 | * @id returns a value in the range @starting_id ... %0x7fffffff |
| 301 | */ | 301 | */ |
| 302 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | 302 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
| 303 | { | 303 | { |
| @@ -322,12 +322,12 @@ EXPORT_SYMBOL(idr_get_new_above); | |||
| 322 | * @id: pointer to the allocated handle | 322 | * @id: pointer to the allocated handle |
| 323 | * | 323 | * |
| 324 | * If allocation from IDR's private freelist fails, idr_get_new_above() will | 324 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
| 325 | * return -EAGAIN. The caller should retry the idr_pre_get() call to refill | 325 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
| 326 | * IDR's preallocation and then retry the idr_get_new_above() call. | 326 | * IDR's preallocation and then retry the idr_get_new_above() call. |
| 327 | * | 327 | * |
| 328 | * If the idr is full idr_get_new_above() will return -ENOSPC. | 328 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
| 329 | * | 329 | * |
| 330 | * @id returns a value in the range 0 ... 0x7fffffff | 330 | * @id returns a value in the range %0 ... %0x7fffffff |
| 331 | */ | 331 | */ |
| 332 | int idr_get_new(struct idr *idp, void *ptr, int *id) | 332 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
| 333 | { | 333 | { |
| @@ -390,7 +390,7 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
| 390 | } | 390 | } |
| 391 | 391 | ||
| 392 | /** | 392 | /** |
| 393 | * idr_remove - remove the given id and free it's slot | 393 | * idr_remove - remove the given id and free its slot |
| 394 | * @idp: idr handle | 394 | * @idp: idr handle |
| 395 | * @id: unique key | 395 | * @id: unique key |
| 396 | */ | 396 | */ |
| @@ -439,7 +439,7 @@ EXPORT_SYMBOL(idr_remove); | |||
| 439 | * function will remove all id mappings and leave all idp_layers | 439 | * function will remove all id mappings and leave all idp_layers |
| 440 | * unused. | 440 | * unused. |
| 441 | * | 441 | * |
| 442 | * A typical clean-up sequence for objects stored in an idr tree, will | 442 | * A typical clean-up sequence for objects stored in an idr tree will |
| 443 | * use idr_for_each() to free all objects, if necessay, then | 443 | * use idr_for_each() to free all objects, if necessay, then |
| 444 | * idr_remove_all() to remove all ids, and idr_destroy() to free | 444 | * idr_remove_all() to remove all ids, and idr_destroy() to free |
| 445 | * up the cached idr_layers. | 445 | * up the cached idr_layers. |
| @@ -544,7 +544,7 @@ EXPORT_SYMBOL(idr_find); | |||
| 544 | * not allowed. | 544 | * not allowed. |
| 545 | * | 545 | * |
| 546 | * We check the return of @fn each time. If it returns anything other | 546 | * We check the return of @fn each time. If it returns anything other |
| 547 | * than 0, we break out and return that value. | 547 | * than %0, we break out and return that value. |
| 548 | * | 548 | * |
| 549 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). | 549 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
| 550 | */ | 550 | */ |
| @@ -639,8 +639,8 @@ EXPORT_SYMBOL(idr_get_next); | |||
| 639 | * @id: lookup key | 639 | * @id: lookup key |
| 640 | * | 640 | * |
| 641 | * Replace the pointer registered with an id and return the old value. | 641 | * Replace the pointer registered with an id and return the old value. |
| 642 | * A -ENOENT return indicates that @id was not found. | 642 | * A %-ENOENT return indicates that @id was not found. |
| 643 | * A -EINVAL return indicates that @id was not within valid constraints. | 643 | * A %-EINVAL return indicates that @id was not within valid constraints. |
| 644 | * | 644 | * |
| 645 | * The caller must serialize with writers. | 645 | * The caller must serialize with writers. |
| 646 | */ | 646 | */ |
| @@ -698,10 +698,11 @@ void idr_init(struct idr *idp) | |||
| 698 | EXPORT_SYMBOL(idr_init); | 698 | EXPORT_SYMBOL(idr_init); |
| 699 | 699 | ||
| 700 | 700 | ||
| 701 | /* | 701 | /** |
| 702 | * DOC: IDA description | ||
| 702 | * IDA - IDR based ID allocator | 703 | * IDA - IDR based ID allocator |
| 703 | * | 704 | * |
| 704 | * this is id allocator without id -> pointer translation. Memory | 705 | * This is id allocator without id -> pointer translation. Memory |
| 705 | * usage is much lower than full blown idr because each id only | 706 | * usage is much lower than full blown idr because each id only |
| 706 | * occupies a bit. ida uses a custom leaf node which contains | 707 | * occupies a bit. ida uses a custom leaf node which contains |
| 707 | * IDA_BITMAP_BITS slots. | 708 | * IDA_BITMAP_BITS slots. |
| @@ -734,8 +735,8 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | |||
| 734 | * following function. It preallocates enough memory to satisfy the | 735 | * following function. It preallocates enough memory to satisfy the |
| 735 | * worst possible allocation. | 736 | * worst possible allocation. |
| 736 | * | 737 | * |
| 737 | * If the system is REALLY out of memory this function returns 0, | 738 | * If the system is REALLY out of memory this function returns %0, |
| 738 | * otherwise 1. | 739 | * otherwise %1. |
| 739 | */ | 740 | */ |
| 740 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | 741 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
| 741 | { | 742 | { |
| @@ -767,11 +768,11 @@ EXPORT_SYMBOL(ida_pre_get); | |||
| 767 | * Allocate new ID above or equal to @ida. It should be called with | 768 | * Allocate new ID above or equal to @ida. It should be called with |
| 768 | * any required locks. | 769 | * any required locks. |
| 769 | * | 770 | * |
| 770 | * If memory is required, it will return -EAGAIN, you should unlock | 771 | * If memory is required, it will return %-EAGAIN, you should unlock |
| 771 | * and go back to the ida_pre_get() call. If the ida is full, it will | 772 | * and go back to the ida_pre_get() call. If the ida is full, it will |
| 772 | * return -ENOSPC. | 773 | * return %-ENOSPC. |
| 773 | * | 774 | * |
| 774 | * @p_id returns a value in the range @starting_id ... 0x7fffffff. | 775 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. |
| 775 | */ | 776 | */ |
| 776 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 777 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
| 777 | { | 778 | { |
| @@ -853,11 +854,11 @@ EXPORT_SYMBOL(ida_get_new_above); | |||
| 853 | * | 854 | * |
| 854 | * Allocate new ID. It should be called with any required locks. | 855 | * Allocate new ID. It should be called with any required locks. |
| 855 | * | 856 | * |
| 856 | * If memory is required, it will return -EAGAIN, you should unlock | 857 | * If memory is required, it will return %-EAGAIN, you should unlock |
| 857 | * and go back to the idr_pre_get() call. If the idr is full, it will | 858 | * and go back to the idr_pre_get() call. If the idr is full, it will |
| 858 | * return -ENOSPC. | 859 | * return %-ENOSPC. |
| 859 | * | 860 | * |
| 860 | * @id returns a value in the range 0 ... 0x7fffffff. | 861 | * @id returns a value in the range %0 ... %0x7fffffff. |
| 861 | */ | 862 | */ |
| 862 | int ida_get_new(struct ida *ida, int *p_id) | 863 | int ida_get_new(struct ida *ida, int *p_id) |
| 863 | { | 864 | { |
