diff options
| author | Tejun Heo <htejun@gmail.com> | 2007-06-13 14:45:13 -0400 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@suse.de> | 2007-07-11 19:09:03 -0400 |
| commit | 72dba584b695d8bc8c1a50ed54ad4cba7c62314d (patch) | |
| tree | b0938ea773953f869b22101bb021e5710d0a0fec /lib | |
| parent | e33ac8bdb0c84fe7afd2c45537b763faf28c589e (diff) | |
ida: implement idr based id allocator
Implement idr based id allocator. ida is used the same way idr is
used but lacks id -> ptr translation and thus consumes much less
memory. struct ida_bitmap is attached as leaf nodes to idr tree which
is managed by the idr code. Each ida_bitmap is 128bytes long and
contains slightly less than a thousand slots.
ida is more aggressive with releasing extra resources acquired using
ida_pre_get(). After every successful id allocation, ida frees one
reserved idr_layer if possible. Reserved ida_bitmap is not freed
automatically but only one ida_bitmap is reserved and it's almost
always used right away. Under most circumstances, ida won't hold on
to memory for too long which isn't actively used.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/idr.c | 245 |
1 files changed, 245 insertions, 0 deletions
| @@ -506,3 +506,248 @@ void idr_init(struct idr *idp) | |||
| 506 | spin_lock_init(&idp->lock); | 506 | spin_lock_init(&idp->lock); |
| 507 | } | 507 | } |
| 508 | EXPORT_SYMBOL(idr_init); | 508 | EXPORT_SYMBOL(idr_init); |
| 509 | |||
| 510 | |||
| 511 | /* | ||
| 512 | * IDA - IDR based ID allocator | ||
| 513 | * | ||
| 514 | * this is id allocator without id -> pointer translation. Memory | ||
| 515 | * usage is much lower than full blown idr because each id only | ||
| 516 | * occupies a bit. ida uses a custom leaf node which contains | ||
| 517 | * IDA_BITMAP_BITS slots. | ||
| 518 | * | ||
| 519 | * 2007-04-25 written by Tejun Heo <htejun@gmail.com> | ||
| 520 | */ | ||
| 521 | |||
| 522 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | ||
| 523 | { | ||
| 524 | unsigned long flags; | ||
| 525 | |||
| 526 | if (!ida->free_bitmap) { | ||
| 527 | spin_lock_irqsave(&ida->idr.lock, flags); | ||
| 528 | if (!ida->free_bitmap) { | ||
| 529 | ida->free_bitmap = bitmap; | ||
| 530 | bitmap = NULL; | ||
| 531 | } | ||
| 532 | spin_unlock_irqrestore(&ida->idr.lock, flags); | ||
| 533 | } | ||
| 534 | |||
| 535 | kfree(bitmap); | ||
| 536 | } | ||
| 537 | |||
| 538 | /** | ||
| 539 | * ida_pre_get - reserve resources for ida allocation | ||
| 540 | * @ida: ida handle | ||
| 541 | * @gfp_mask: memory allocation flag | ||
| 542 | * | ||
| 543 | * This function should be called prior to locking and calling the | ||
| 544 | * following function. It preallocates enough memory to satisfy the | ||
| 545 | * worst possible allocation. | ||
| 546 | * | ||
| 547 | * If the system is REALLY out of memory this function returns 0, | ||
| 548 | * otherwise 1. | ||
| 549 | */ | ||
| 550 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | ||
| 551 | { | ||
| 552 | /* allocate idr_layers */ | ||
| 553 | if (!idr_pre_get(&ida->idr, gfp_mask)) | ||
| 554 | return 0; | ||
| 555 | |||
| 556 | /* allocate free_bitmap */ | ||
| 557 | if (!ida->free_bitmap) { | ||
| 558 | struct ida_bitmap *bitmap; | ||
| 559 | |||
| 560 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); | ||
| 561 | if (!bitmap) | ||
| 562 | return 0; | ||
| 563 | |||
| 564 | free_bitmap(ida, bitmap); | ||
| 565 | } | ||
| 566 | |||
| 567 | return 1; | ||
| 568 | } | ||
| 569 | EXPORT_SYMBOL(ida_pre_get); | ||
| 570 | |||
| 571 | /** | ||
| 572 | * ida_get_new_above - allocate new ID above or equal to a start id | ||
| 573 | * @ida: ida handle | ||
| 574 | * @staring_id: id to start search at | ||
| 575 | * @p_id: pointer to the allocated handle | ||
| 576 | * | ||
| 577 | * Allocate new ID above or equal to @ida. It should be called with | ||
| 578 | * any required locks. | ||
| 579 | * | ||
| 580 | * If memory is required, it will return -EAGAIN, you should unlock | ||
| 581 | * and go back to the ida_pre_get() call. If the ida is full, it will | ||
| 582 | * return -ENOSPC. | ||
| 583 | * | ||
| 584 | * @p_id returns a value in the range 0 ... 0x7fffffff. | ||
| 585 | */ | ||
| 586 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | ||
| 587 | { | ||
| 588 | struct idr_layer *pa[MAX_LEVEL]; | ||
| 589 | struct ida_bitmap *bitmap; | ||
| 590 | unsigned long flags; | ||
| 591 | int idr_id = starting_id / IDA_BITMAP_BITS; | ||
| 592 | int offset = starting_id % IDA_BITMAP_BITS; | ||
| 593 | int t, id; | ||
| 594 | |||
| 595 | restart: | ||
| 596 | /* get vacant slot */ | ||
| 597 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | ||
| 598 | if (t < 0) { | ||
| 599 | if (t == -1) | ||
| 600 | return -EAGAIN; | ||
| 601 | else /* will be -3 */ | ||
| 602 | return -ENOSPC; | ||
| 603 | } | ||
| 604 | |||
| 605 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) | ||
| 606 | return -ENOSPC; | ||
| 607 | |||
| 608 | if (t != idr_id) | ||
| 609 | offset = 0; | ||
| 610 | idr_id = t; | ||
| 611 | |||
| 612 | /* if bitmap isn't there, create a new one */ | ||
| 613 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; | ||
| 614 | if (!bitmap) { | ||
| 615 | spin_lock_irqsave(&ida->idr.lock, flags); | ||
| 616 | bitmap = ida->free_bitmap; | ||
| 617 | ida->free_bitmap = NULL; | ||
| 618 | spin_unlock_irqrestore(&ida->idr.lock, flags); | ||
| 619 | |||
| 620 | if (!bitmap) | ||
| 621 | return -EAGAIN; | ||
| 622 | |||
| 623 | memset(bitmap, 0, sizeof(struct ida_bitmap)); | ||
| 624 | pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; | ||
| 625 | pa[0]->count++; | ||
| 626 | } | ||
| 627 | |||
| 628 | /* lookup for empty slot */ | ||
| 629 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); | ||
| 630 | if (t == IDA_BITMAP_BITS) { | ||
| 631 | /* no empty slot after offset, continue to the next chunk */ | ||
| 632 | idr_id++; | ||
| 633 | offset = 0; | ||
| 634 | goto restart; | ||
| 635 | } | ||
| 636 | |||
| 637 | id = idr_id * IDA_BITMAP_BITS + t; | ||
| 638 | if (id >= MAX_ID_BIT) | ||
| 639 | return -ENOSPC; | ||
| 640 | |||
| 641 | __set_bit(t, bitmap->bitmap); | ||
| 642 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) | ||
| 643 | idr_mark_full(pa, idr_id); | ||
| 644 | |||
| 645 | *p_id = id; | ||
| 646 | |||
| 647 | /* Each leaf node can handle nearly a thousand slots and the | ||
| 648 | * whole idea of ida is to have small memory foot print. | ||
| 649 | * Throw away extra resources one by one after each successful | ||
| 650 | * allocation. | ||
| 651 | */ | ||
| 652 | if (ida->idr.id_free_cnt || ida->free_bitmap) { | ||
| 653 | struct idr_layer *p = alloc_layer(&ida->idr); | ||
| 654 | if (p) | ||
| 655 | kmem_cache_free(idr_layer_cache, p); | ||
| 656 | } | ||
| 657 | |||
| 658 | return 0; | ||
| 659 | } | ||
| 660 | EXPORT_SYMBOL(ida_get_new_above); | ||
| 661 | |||
| 662 | /** | ||
| 663 | * ida_get_new - allocate new ID | ||
| 664 | * @ida: idr handle | ||
| 665 | * @p_id: pointer to the allocated handle | ||
| 666 | * | ||
| 667 | * Allocate new ID. It should be called with any required locks. | ||
| 668 | * | ||
| 669 | * If memory is required, it will return -EAGAIN, you should unlock | ||
| 670 | * and go back to the idr_pre_get() call. If the idr is full, it will | ||
| 671 | * return -ENOSPC. | ||
| 672 | * | ||
| 673 | * @id returns a value in the range 0 ... 0x7fffffff. | ||
| 674 | */ | ||
| 675 | int ida_get_new(struct ida *ida, int *p_id) | ||
| 676 | { | ||
| 677 | return ida_get_new_above(ida, 0, p_id); | ||
| 678 | } | ||
| 679 | EXPORT_SYMBOL(ida_get_new); | ||
| 680 | |||
| 681 | /** | ||
| 682 | * ida_remove - remove the given ID | ||
| 683 | * @ida: ida handle | ||
| 684 | * @id: ID to free | ||
| 685 | */ | ||
| 686 | void ida_remove(struct ida *ida, int id) | ||
| 687 | { | ||
| 688 | struct idr_layer *p = ida->idr.top; | ||
| 689 | int shift = (ida->idr.layers - 1) * IDR_BITS; | ||
| 690 | int idr_id = id / IDA_BITMAP_BITS; | ||
| 691 | int offset = id % IDA_BITMAP_BITS; | ||
| 692 | int n; | ||
| 693 | struct ida_bitmap *bitmap; | ||
| 694 | |||
| 695 | /* clear full bits while looking up the leaf idr_layer */ | ||
| 696 | while ((shift > 0) && p) { | ||
| 697 | n = (idr_id >> shift) & IDR_MASK; | ||
| 698 | __clear_bit(n, &p->bitmap); | ||
| 699 | p = p->ary[n]; | ||
| 700 | shift -= IDR_BITS; | ||
| 701 | } | ||
| 702 | |||
| 703 | if (p == NULL) | ||
| 704 | goto err; | ||
| 705 | |||
| 706 | n = idr_id & IDR_MASK; | ||
| 707 | __clear_bit(n, &p->bitmap); | ||
| 708 | |||
| 709 | bitmap = (void *)p->ary[n]; | ||
| 710 | if (!test_bit(offset, bitmap->bitmap)) | ||
| 711 | goto err; | ||
| 712 | |||
| 713 | /* update bitmap and remove it if empty */ | ||
| 714 | __clear_bit(offset, bitmap->bitmap); | ||
| 715 | if (--bitmap->nr_busy == 0) { | ||
| 716 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ | ||
| 717 | idr_remove(&ida->idr, idr_id); | ||
| 718 | free_bitmap(ida, bitmap); | ||
| 719 | } | ||
| 720 | |||
| 721 | return; | ||
| 722 | |||
| 723 | err: | ||
| 724 | printk(KERN_WARNING | ||
| 725 | "ida_remove called for id=%d which is not allocated.\n", id); | ||
| 726 | } | ||
| 727 | EXPORT_SYMBOL(ida_remove); | ||
| 728 | |||
| 729 | /** | ||
| 730 | * ida_destroy - release all cached layers within an ida tree | ||
| 731 | * ida: ida handle | ||
| 732 | */ | ||
| 733 | void ida_destroy(struct ida *ida) | ||
| 734 | { | ||
| 735 | idr_destroy(&ida->idr); | ||
| 736 | kfree(ida->free_bitmap); | ||
| 737 | } | ||
| 738 | EXPORT_SYMBOL(ida_destroy); | ||
| 739 | |||
| 740 | /** | ||
| 741 | * ida_init - initialize ida handle | ||
| 742 | * @ida: ida handle | ||
| 743 | * | ||
| 744 | * This function is use to set up the handle (@ida) that you will pass | ||
| 745 | * to the rest of the functions. | ||
| 746 | */ | ||
| 747 | void ida_init(struct ida *ida) | ||
| 748 | { | ||
| 749 | memset(ida, 0, sizeof(struct ida)); | ||
| 750 | idr_init(&ida->idr); | ||
| 751 | |||
| 752 | } | ||
| 753 | EXPORT_SYMBOL(ida_init); | ||
