aboutsummaryrefslogtreecommitdiffstats
path: root/lib/idr.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-02-27 20:05:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 22:10:21 -0500
commit0ffc2a9c8072969253a20821c2c733a2cbb4c7c7 (patch)
treeff6a9d270d4f0fa5c770837c6d9e5d5aac2637a7 /lib/idr.c
parent54616283c2948812a44240858ced610e7cacbde1 (diff)
idr: implement lookup hint
While idr lookup isn't a particularly heavy operation, it still is too substantial to use in hot paths without worrying about the performance implications. With recent changes, each idr_layer covers 256 slots which should be enough to cover most use cases with single idr_layer making lookup hint very attractive. This patch adds idr->hint which points to the idr_layer which allocated an ID most recently and the fast path lookup becomes if (look up target's prefix matches that of the hinted layer) return hint->ary[ID's offset in the leaf layer]; which can be inlined. idr->hint is set to the leaf node on idr_fill_slot() and cleared from free_layer(). [andriy.shevchenko@linux.intel.com: always do slow path when hint is uninitialized] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/idr.c')
-rw-r--r--lib/idr.c38
1 files changed, 16 insertions, 22 deletions
diff --git a/lib/idr.c b/lib/idr.c
index 5cd602936645..1a30272066c6 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -137,8 +137,10 @@ static void idr_layer_rcu_free(struct rcu_head *head)
137 kmem_cache_free(idr_layer_cache, layer); 137 kmem_cache_free(idr_layer_cache, layer);
138} 138}
139 139
140static inline void free_layer(struct idr_layer *p) 140static inline void free_layer(struct idr *idr, struct idr_layer *p)
141{ 141{
142 if (idr->hint && idr->hint == p)
143 RCU_INIT_POINTER(idr->hint, NULL);
142 call_rcu(&p->rcu_head, idr_layer_rcu_free); 144 call_rcu(&p->rcu_head, idr_layer_rcu_free);
143} 145}
144 146
@@ -363,8 +365,12 @@ build_up:
363 * @id and @pa are from a successful allocation from idr_get_empty_slot(). 365 * @id and @pa are from a successful allocation from idr_get_empty_slot().
364 * Install the user pointer @ptr and mark the slot full. 366 * Install the user pointer @ptr and mark the slot full.
365 */ 367 */
366static void idr_fill_slot(void *ptr, int id, struct idr_layer **pa) 368static void idr_fill_slot(struct idr *idr, void *ptr, int id,
369 struct idr_layer **pa)
367{ 370{
371 /* update hint used for lookup, cleared from free_layer() */
372 rcu_assign_pointer(idr->hint, pa[0]);
373
368 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); 374 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
369 pa[0]->count++; 375 pa[0]->count++;
370 idr_mark_full(pa, id); 376 idr_mark_full(pa, id);
@@ -397,7 +403,7 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
397 if (rv < 0) 403 if (rv < 0)
398 return rv == -ENOMEM ? -EAGAIN : rv; 404 return rv == -ENOMEM ? -EAGAIN : rv;
399 405
400 idr_fill_slot(ptr, rv, pa); 406 idr_fill_slot(idp, ptr, rv, pa);
401 *id = rv; 407 *id = rv;
402 return 0; 408 return 0;
403} 409}
@@ -504,7 +510,7 @@ int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
504 if (unlikely(id > max)) 510 if (unlikely(id > max))
505 return -ENOSPC; 511 return -ENOSPC;
506 512
507 idr_fill_slot(ptr, id, pa); 513 idr_fill_slot(idr, ptr, id, pa);
508 return id; 514 return id;
509} 515}
510EXPORT_SYMBOL_GPL(idr_alloc); 516EXPORT_SYMBOL_GPL(idr_alloc);
@@ -541,14 +547,14 @@ static void sub_remove(struct idr *idp, int shift, int id)
541 to_free = NULL; 547 to_free = NULL;
542 while(*paa && ! --((**paa)->count)){ 548 while(*paa && ! --((**paa)->count)){
543 if (to_free) 549 if (to_free)
544 free_layer(to_free); 550 free_layer(idp, to_free);
545 to_free = **paa; 551 to_free = **paa;
546 **paa-- = NULL; 552 **paa-- = NULL;
547 } 553 }
548 if (!*paa) 554 if (!*paa)
549 idp->layers = 0; 555 idp->layers = 0;
550 if (to_free) 556 if (to_free)
551 free_layer(to_free); 557 free_layer(idp, to_free);
552 } else 558 } else
553 idr_remove_warning(id); 559 idr_remove_warning(id);
554} 560}
@@ -581,7 +587,7 @@ void idr_remove(struct idr *idp, int id)
581 --idp->layers; 587 --idp->layers;
582 to_free->count = 0; 588 to_free->count = 0;
583 bitmap_clear(to_free->bitmap, 0, IDR_SIZE); 589 bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
584 free_layer(to_free); 590 free_layer(idp, to_free);
585 } 591 }
586 while (idp->id_free_cnt >= MAX_IDR_FREE) { 592 while (idp->id_free_cnt >= MAX_IDR_FREE) {
587 p = get_from_free_list(idp); 593 p = get_from_free_list(idp);
@@ -622,7 +628,7 @@ void __idr_remove_all(struct idr *idp)
622 /* Get the highest bit that the above add changed from 0->1. */ 628 /* Get the highest bit that the above add changed from 0->1. */
623 while (n < fls(id ^ bt_mask)) { 629 while (n < fls(id ^ bt_mask)) {
624 if (p) 630 if (p)
625 free_layer(p); 631 free_layer(idp, p);
626 n += IDR_BITS; 632 n += IDR_BITS;
627 p = *--paa; 633 p = *--paa;
628 } 634 }
@@ -655,19 +661,7 @@ void idr_destroy(struct idr *idp)
655} 661}
656EXPORT_SYMBOL(idr_destroy); 662EXPORT_SYMBOL(idr_destroy);
657 663
658/** 664void *idr_find_slowpath(struct idr *idp, int id)
659 * idr_find - return pointer for given id
660 * @idp: idr handle
661 * @id: lookup key
662 *
663 * Return the pointer given the id it has been registered with. A %NULL
664 * return indicates that @id is not valid or you passed %NULL in
665 * idr_get_new().
666 *
667 * This function can be called under rcu_read_lock(), given that the leaf
668 * pointers lifetimes are correctly managed.
669 */
670void *idr_find(struct idr *idp, int id)
671{ 665{
672 int n; 666 int n;
673 struct idr_layer *p; 667 struct idr_layer *p;
@@ -691,7 +685,7 @@ void *idr_find(struct idr *idp, int id)
691 } 685 }
692 return((void *)p); 686 return((void *)p);
693} 687}
694EXPORT_SYMBOL(idr_find); 688EXPORT_SYMBOL(idr_find_slowpath);
695 689
696/** 690/**
697 * idr_for_each - iterate through all stored pointers 691 * idr_for_each - iterate through all stored pointers