diff options
Diffstat (limited to 'lib/idr.c')
-rw-r--r-- | lib/idr.c | 96 |
1 files changed, 33 insertions, 63 deletions
@@ -106,8 +106,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) | |||
106 | if (layer_idr) | 106 | if (layer_idr) |
107 | return get_from_free_list(layer_idr); | 107 | return get_from_free_list(layer_idr); |
108 | 108 | ||
109 | /* try to allocate directly from kmem_cache */ | 109 | /* |
110 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | 110 | * Try to allocate directly from kmem_cache. We want to try this |
111 | * before preload buffer; otherwise, non-preloading idr_alloc() | ||
112 | * users will end up taking advantage of preloading ones. As the | ||
113 | * following is allowed to fail for preloaded cases, suppress | ||
114 | * warning this time. | ||
115 | */ | ||
116 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); | ||
111 | if (new) | 117 | if (new) |
112 | return new; | 118 | return new; |
113 | 119 | ||
@@ -115,18 +121,24 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) | |||
115 | * Try to fetch one from the per-cpu preload buffer if in process | 121 | * Try to fetch one from the per-cpu preload buffer if in process |
116 | * context. See idr_preload() for details. | 122 | * context. See idr_preload() for details. |
117 | */ | 123 | */ |
118 | if (in_interrupt()) | 124 | if (!in_interrupt()) { |
119 | return NULL; | 125 | preempt_disable(); |
120 | 126 | new = __this_cpu_read(idr_preload_head); | |
121 | preempt_disable(); | 127 | if (new) { |
122 | new = __this_cpu_read(idr_preload_head); | 128 | __this_cpu_write(idr_preload_head, new->ary[0]); |
123 | if (new) { | 129 | __this_cpu_dec(idr_preload_cnt); |
124 | __this_cpu_write(idr_preload_head, new->ary[0]); | 130 | new->ary[0] = NULL; |
125 | __this_cpu_dec(idr_preload_cnt); | 131 | } |
126 | new->ary[0] = NULL; | 132 | preempt_enable(); |
133 | if (new) | ||
134 | return new; | ||
127 | } | 135 | } |
128 | preempt_enable(); | 136 | |
129 | return new; | 137 | /* |
138 | * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so | ||
139 | * that memory allocation failure warning is printed as intended. | ||
140 | */ | ||
141 | return kmem_cache_zalloc(idr_layer_cache, gfp_mask); | ||
130 | } | 142 | } |
131 | 143 | ||
132 | static void idr_layer_rcu_free(struct rcu_head *head) | 144 | static void idr_layer_rcu_free(struct rcu_head *head) |
@@ -184,20 +196,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) | |||
184 | } | 196 | } |
185 | } | 197 | } |
186 | 198 | ||
187 | /** | 199 | int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
188 | * idr_pre_get - reserve resources for idr allocation | ||
189 | * @idp: idr handle | ||
190 | * @gfp_mask: memory allocation flags | ||
191 | * | ||
192 | * This function should be called prior to calling the idr_get_new* functions. | ||
193 | * It preallocates enough memory to satisfy the worst possible allocation. The | ||
194 | * caller should pass in GFP_KERNEL if possible. This of course requires that | ||
195 | * no spinning locks be held. | ||
196 | * | ||
197 | * If the system is REALLY out of memory this function returns %0, | ||
198 | * otherwise %1. | ||
199 | */ | ||
200 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | ||
201 | { | 200 | { |
202 | while (idp->id_free_cnt < MAX_IDR_FREE) { | 201 | while (idp->id_free_cnt < MAX_IDR_FREE) { |
203 | struct idr_layer *new; | 202 | struct idr_layer *new; |
@@ -208,13 +207,12 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | |||
208 | } | 207 | } |
209 | return 1; | 208 | return 1; |
210 | } | 209 | } |
211 | EXPORT_SYMBOL(idr_pre_get); | 210 | EXPORT_SYMBOL(__idr_pre_get); |
212 | 211 | ||
213 | /** | 212 | /** |
214 | * sub_alloc - try to allocate an id without growing the tree depth | 213 | * sub_alloc - try to allocate an id without growing the tree depth |
215 | * @idp: idr handle | 214 | * @idp: idr handle |
216 | * @starting_id: id to start search at | 215 | * @starting_id: id to start search at |
217 | * @id: pointer to the allocated handle | ||
218 | * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer | 216 | * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer |
219 | * @gfp_mask: allocation mask for idr_layer_alloc() | 217 | * @gfp_mask: allocation mask for idr_layer_alloc() |
220 | * @layer_idr: optional idr passed to idr_layer_alloc() | 218 | * @layer_idr: optional idr passed to idr_layer_alloc() |
@@ -376,25 +374,7 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id, | |||
376 | idr_mark_full(pa, id); | 374 | idr_mark_full(pa, id); |
377 | } | 375 | } |
378 | 376 | ||
379 | /** | 377 | int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
380 | * idr_get_new_above - allocate new idr entry above or equal to a start id | ||
381 | * @idp: idr handle | ||
382 | * @ptr: pointer you want associated with the id | ||
383 | * @starting_id: id to start search at | ||
384 | * @id: pointer to the allocated handle | ||
385 | * | ||
386 | * This is the allocate id function. It should be called with any | ||
387 | * required locks. | ||
388 | * | ||
389 | * If allocation from IDR's private freelist fails, idr_get_new_above() will | ||
390 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill | ||
391 | * IDR's preallocation and then retry the idr_get_new_above() call. | ||
392 | * | ||
393 | * If the idr is full idr_get_new_above() will return %-ENOSPC. | ||
394 | * | ||
395 | * @id returns a value in the range @starting_id ... %0x7fffffff | ||
396 | */ | ||
397 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | ||
398 | { | 378 | { |
399 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; | 379 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
400 | int rv; | 380 | int rv; |
@@ -407,7 +387,7 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | |||
407 | *id = rv; | 387 | *id = rv; |
408 | return 0; | 388 | return 0; |
409 | } | 389 | } |
410 | EXPORT_SYMBOL(idr_get_new_above); | 390 | EXPORT_SYMBOL(__idr_get_new_above); |
411 | 391 | ||
412 | /** | 392 | /** |
413 | * idr_preload - preload for idr_alloc() | 393 | * idr_preload - preload for idr_alloc() |
@@ -569,8 +549,7 @@ void idr_remove(struct idr *idp, int id) | |||
569 | struct idr_layer *p; | 549 | struct idr_layer *p; |
570 | struct idr_layer *to_free; | 550 | struct idr_layer *to_free; |
571 | 551 | ||
572 | /* see comment in idr_find_slowpath() */ | 552 | if (id < 0) |
573 | if (WARN_ON_ONCE(id < 0)) | ||
574 | return; | 553 | return; |
575 | 554 | ||
576 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | 555 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
@@ -667,15 +646,7 @@ void *idr_find_slowpath(struct idr *idp, int id) | |||
667 | int n; | 646 | int n; |
668 | struct idr_layer *p; | 647 | struct idr_layer *p; |
669 | 648 | ||
670 | /* | 649 | if (id < 0) |
671 | * If @id is negative, idr_find() used to ignore the sign bit and | ||
672 | * performed lookup with the rest of bits, which is weird and can | ||
673 | * lead to very obscure bugs. We're now returning NULL for all | ||
674 | * negative IDs but just in case somebody was depending on the sign | ||
675 | * bit being ignored, let's trigger WARN_ON_ONCE() so that they can | ||
676 | * be detected and fixed. WARN_ON_ONCE() can later be removed. | ||
677 | */ | ||
678 | if (WARN_ON_ONCE(id < 0)) | ||
679 | return NULL; | 650 | return NULL; |
680 | 651 | ||
681 | p = rcu_dereference_raw(idp->top); | 652 | p = rcu_dereference_raw(idp->top); |
@@ -824,8 +795,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
824 | int n; | 795 | int n; |
825 | struct idr_layer *p, *old_p; | 796 | struct idr_layer *p, *old_p; |
826 | 797 | ||
827 | /* see comment in idr_find_slowpath() */ | 798 | if (id < 0) |
828 | if (WARN_ON_ONCE(id < 0)) | ||
829 | return ERR_PTR(-EINVAL); | 799 | return ERR_PTR(-EINVAL); |
830 | 800 | ||
831 | p = idp->top; | 801 | p = idp->top; |
@@ -918,7 +888,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | |||
918 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | 888 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
919 | { | 889 | { |
920 | /* allocate idr_layers */ | 890 | /* allocate idr_layers */ |
921 | if (!idr_pre_get(&ida->idr, gfp_mask)) | 891 | if (!__idr_pre_get(&ida->idr, gfp_mask)) |
922 | return 0; | 892 | return 0; |
923 | 893 | ||
924 | /* allocate free_bitmap */ | 894 | /* allocate free_bitmap */ |