aboutsummaryrefslogtreecommitdiffstats
path: root/lib/idr.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-02-27 20:03:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 22:10:14 -0500
commitd5c7409f79e14db49d00785692334657592c07ff (patch)
tree9646f4a2934cf4d221f173c17defa22b90585e28 /lib/idr.c
parent3594eb2894f571c9b9a497159b1e4d84fdac5688 (diff)
idr: implement idr_preload[_end]() and idr_alloc()
The current idr interface is very cumbersome. * For all allocations, two function calls - idr_pre_get() and idr_get_new*() - should be made. * idr_pre_get() doesn't guarantee that the following idr_get_new*() will not fail from memory shortage. If idr_get_new*() returns -EAGAIN, the caller is expected to retry pre_get and allocation. * idr_get_new*() can't enforce upper limit. Upper limit can only be enforced by allocating and then freeing if above limit. * idr_layer buffer is unnecessarily per-idr. Each idr ends up keeping around MAX_IDR_FREE idr_layers. The memory consumed per idr is under two pages but it makes it difficult to make idr_layer larger. This patch implements the following new set of allocation functions. * idr_preload[_end]() - Similar to radix preload but doesn't fail. The first idr_alloc() inside preload section can be treated as if it were called with @gfp_mask used for idr_preload(). * idr_alloc() - Allocate an ID w/ lower and upper limits. Takes @gfp_flags and can be used w/o preloading. When used inside preloaded section, the allocation mask of preloading can be assumed. If idr_alloc() can be called from a context which allows sufficiently relaxed @gfp_mask, it can be used by itself. If, for example, idr_alloc() is called inside spinlock protected region, preloading can be used like the following. idr_preload(GFP_KERNEL); spin_lock(lock); id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT); spin_unlock(lock); idr_preload_end(); if (id < 0) error; which is much simpler and less error-prone than idr_pre_get and idr_get_new*() loop. The new interface uses per-pcu idr_layer buffer and thus the number of idr's in the system doesn't affect the amount of memory used for preloading. idr_layer_alloc() is introduced to handle idr_layer allocations for both old and new ID allocation paths. This is a bit hairy now but the new interface is expected to replace the old and the internal implementation eventually will become simpler. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/idr.c')
-rw-r--r--lib/idr.c174
1 files changed, 166 insertions, 8 deletions
diff --git a/lib/idr.c b/lib/idr.c
index b13aae5bdc81..2d016f5c410e 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -35,8 +35,12 @@
35#include <linux/string.h> 35#include <linux/string.h>
36#include <linux/idr.h> 36#include <linux/idr.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/percpu.h>
39#include <linux/hardirq.h>
38 40
39static struct kmem_cache *idr_layer_cache; 41static struct kmem_cache *idr_layer_cache;
42static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
43static DEFINE_PER_CPU(int, idr_preload_cnt);
40static DEFINE_SPINLOCK(simple_ida_lock); 44static DEFINE_SPINLOCK(simple_ida_lock);
41 45
42static struct idr_layer *get_from_free_list(struct idr *idp) 46static struct idr_layer *get_from_free_list(struct idr *idp)
@@ -54,6 +58,50 @@ static struct idr_layer *get_from_free_list(struct idr *idp)
54 return(p); 58 return(p);
55} 59}
56 60
61/**
62 * idr_layer_alloc - allocate a new idr_layer
63 * @gfp_mask: allocation mask
64 * @layer_idr: optional idr to allocate from
65 *
66 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
67 * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
68 * an idr_layer from @idr->id_free.
69 *
70 * @layer_idr is to maintain backward compatibility with the old alloc
71 * interface - idr_pre_get() and idr_get_new*() - and will be removed
72 * together with per-pool preload buffer.
73 */
74static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
75{
76 struct idr_layer *new;
77
78 /* this is the old path, bypass to get_from_free_list() */
79 if (layer_idr)
80 return get_from_free_list(layer_idr);
81
82 /* try to allocate directly from kmem_cache */
83 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
84 if (new)
85 return new;
86
87 /*
88 * Try to fetch one from the per-cpu preload buffer if in process
89 * context. See idr_preload() for details.
90 */
91 if (in_interrupt())
92 return NULL;
93
94 preempt_disable();
95 new = __this_cpu_read(idr_preload_head);
96 if (new) {
97 __this_cpu_write(idr_preload_head, new->ary[0]);
98 __this_cpu_dec(idr_preload_cnt);
99 new->ary[0] = NULL;
100 }
101 preempt_enable();
102 return new;
103}
104
57static void idr_layer_rcu_free(struct rcu_head *head) 105static void idr_layer_rcu_free(struct rcu_head *head)
58{ 106{
59 struct idr_layer *layer; 107 struct idr_layer *layer;
@@ -139,6 +187,8 @@ EXPORT_SYMBOL(idr_pre_get);
139 * @starting_id: id to start search at 187 * @starting_id: id to start search at
140 * @id: pointer to the allocated handle 188 * @id: pointer to the allocated handle
141 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer 189 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
190 * @gfp_mask: allocation mask for idr_layer_alloc()
191 * @layer_idr: optional idr passed to idr_layer_alloc()
142 * 192 *
143 * Allocate an id in range [@starting_id, INT_MAX] from @idp without 193 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
144 * growing its depth. Returns 194 * growing its depth. Returns
@@ -148,7 +198,8 @@ EXPORT_SYMBOL(idr_pre_get);
148 * -ENOSPC if the id space is exhausted, 198 * -ENOSPC if the id space is exhausted,
149 * -ENOMEM if more idr_layers need to be allocated. 199 * -ENOMEM if more idr_layers need to be allocated.
150 */ 200 */
151static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) 201static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
202 gfp_t gfp_mask, struct idr *layer_idr)
152{ 203{
153 int n, m, sh; 204 int n, m, sh;
154 struct idr_layer *p, *new; 205 struct idr_layer *p, *new;
@@ -202,7 +253,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
202 * Create the layer below if it is missing. 253 * Create the layer below if it is missing.
203 */ 254 */
204 if (!p->ary[m]) { 255 if (!p->ary[m]) {
205 new = get_from_free_list(idp); 256 new = idr_layer_alloc(gfp_mask, layer_idr);
206 if (!new) 257 if (!new)
207 return -ENOMEM; 258 return -ENOMEM;
208 new->layer = l-1; 259 new->layer = l-1;
@@ -218,7 +269,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
218} 269}
219 270
220static int idr_get_empty_slot(struct idr *idp, int starting_id, 271static int idr_get_empty_slot(struct idr *idp, int starting_id,
221 struct idr_layer **pa) 272 struct idr_layer **pa, gfp_t gfp_mask,
273 struct idr *layer_idr)
222{ 274{
223 struct idr_layer *p, *new; 275 struct idr_layer *p, *new;
224 int layers, v, id; 276 int layers, v, id;
@@ -229,7 +281,7 @@ build_up:
229 p = idp->top; 281 p = idp->top;
230 layers = idp->layers; 282 layers = idp->layers;
231 if (unlikely(!p)) { 283 if (unlikely(!p)) {
232 if (!(p = get_from_free_list(idp))) 284 if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
233 return -ENOMEM; 285 return -ENOMEM;
234 p->layer = 0; 286 p->layer = 0;
235 layers = 1; 287 layers = 1;
@@ -248,7 +300,7 @@ build_up:
248 p->layer++; 300 p->layer++;
249 continue; 301 continue;
250 } 302 }
251 if (!(new = get_from_free_list(idp))) { 303 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
252 /* 304 /*
253 * The allocation failed. If we built part of 305 * The allocation failed. If we built part of
254 * the structure tear it down. 306 * the structure tear it down.
@@ -272,7 +324,7 @@ build_up:
272 } 324 }
273 rcu_assign_pointer(idp->top, p); 325 rcu_assign_pointer(idp->top, p);
274 idp->layers = layers; 326 idp->layers = layers;
275 v = sub_alloc(idp, &id, pa); 327 v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
276 if (v == -EAGAIN) 328 if (v == -EAGAIN)
277 goto build_up; 329 goto build_up;
278 return(v); 330 return(v);
@@ -312,7 +364,7 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
312 struct idr_layer *pa[MAX_IDR_LEVEL]; 364 struct idr_layer *pa[MAX_IDR_LEVEL];
313 int rv; 365 int rv;
314 366
315 rv = idr_get_empty_slot(idp, starting_id, pa); 367 rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
316 if (rv < 0) 368 if (rv < 0)
317 return rv == -ENOMEM ? -EAGAIN : rv; 369 return rv == -ENOMEM ? -EAGAIN : rv;
318 370
@@ -322,6 +374,112 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
322} 374}
323EXPORT_SYMBOL(idr_get_new_above); 375EXPORT_SYMBOL(idr_get_new_above);
324 376
377/**
378 * idr_preload - preload for idr_alloc()
379 * @gfp_mask: allocation mask to use for preloading
380 *
381 * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
382 * process context and each idr_preload() invocation should be matched with
383 * idr_preload_end(). Note that preemption is disabled while preloaded.
384 *
385 * The first idr_alloc() in the preloaded section can be treated as if it
386 * were invoked with @gfp_mask used for preloading. This allows using more
387 * permissive allocation masks for idrs protected by spinlocks.
388 *
389 * For example, if idr_alloc() below fails, the failure can be treated as
390 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
391 *
392 * idr_preload(GFP_KERNEL);
393 * spin_lock(lock);
394 *
395 * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
396 *
397 * spin_unlock(lock);
398 * idr_preload_end();
399 * if (id < 0)
400 * error;
401 */
402void idr_preload(gfp_t gfp_mask)
403{
404 /*
405 * Consuming preload buffer from non-process context breaks preload
406 * allocation guarantee. Disallow usage from those contexts.
407 */
408 WARN_ON_ONCE(in_interrupt());
409 might_sleep_if(gfp_mask & __GFP_WAIT);
410
411 preempt_disable();
412
413 /*
414 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
415 * return value from idr_alloc() needs to be checked for failure
416 * anyway. Silently give up if allocation fails. The caller can
417 * treat failures from idr_alloc() as if idr_alloc() were called
418 * with @gfp_mask which should be enough.
419 */
420 while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
421 struct idr_layer *new;
422
423 preempt_enable();
424 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
425 preempt_disable();
426 if (!new)
427 break;
428
429 /* link the new one to per-cpu preload list */
430 new->ary[0] = __this_cpu_read(idr_preload_head);
431 __this_cpu_write(idr_preload_head, new);
432 __this_cpu_inc(idr_preload_cnt);
433 }
434}
435EXPORT_SYMBOL(idr_preload);
436
437/**
438 * idr_alloc - allocate new idr entry
439 * @idr: the (initialized) idr
440 * @ptr: pointer to be associated with the new id
441 * @start: the minimum id (inclusive)
442 * @end: the maximum id (exclusive, <= 0 for max)
443 * @gfp_mask: memory allocation flags
444 *
445 * Allocate an id in [start, end) and associate it with @ptr. If no ID is
446 * available in the specified range, returns -ENOSPC. On memory allocation
447 * failure, returns -ENOMEM.
448 *
449 * Note that @end is treated as max when <= 0. This is to always allow
450 * using @start + N as @end as long as N is inside integer range.
451 *
452 * The user is responsible for exclusively synchronizing all operations
453 * which may modify @idr. However, read-only accesses such as idr_find()
454 * or iteration can be performed under RCU read lock provided the user
455 * destroys @ptr in RCU-safe way after removal from idr.
456 */
457int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
458{
459 int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */
460 struct idr_layer *pa[MAX_IDR_LEVEL];
461 int id;
462
463 might_sleep_if(gfp_mask & __GFP_WAIT);
464
465 /* sanity checks */
466 if (WARN_ON_ONCE(start < 0))
467 return -EINVAL;
468 if (unlikely(max < start))
469 return -ENOSPC;
470
471 /* allocate id */
472 id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
473 if (unlikely(id < 0))
474 return id;
475 if (unlikely(id > max))
476 return -ENOSPC;
477
478 idr_fill_slot(ptr, id, pa);
479 return id;
480}
481EXPORT_SYMBOL_GPL(idr_alloc);
482
325static void idr_remove_warning(int id) 483static void idr_remove_warning(int id)
326{ 484{
327 printk(KERN_WARNING 485 printk(KERN_WARNING
@@ -769,7 +927,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
769 927
770 restart: 928 restart:
771 /* get vacant slot */ 929 /* get vacant slot */
772 t = idr_get_empty_slot(&ida->idr, idr_id, pa); 930 t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
773 if (t < 0) 931 if (t < 0)
774 return t == -ENOMEM ? -EAGAIN : t; 932 return t == -ENOMEM ? -EAGAIN : t;
775 933