diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/idr.c | 332 | ||||
-rw-r--r-- | lib/kobject.c | 19 |
2 files changed, 318 insertions, 33 deletions
@@ -70,6 +70,26 @@ static void free_layer(struct idr *idp, struct idr_layer *p) | |||
70 | spin_unlock_irqrestore(&idp->lock, flags); | 70 | spin_unlock_irqrestore(&idp->lock, flags); |
71 | } | 71 | } |
72 | 72 | ||
73 | static void idr_mark_full(struct idr_layer **pa, int id) | ||
74 | { | ||
75 | struct idr_layer *p = pa[0]; | ||
76 | int l = 0; | ||
77 | |||
78 | __set_bit(id & IDR_MASK, &p->bitmap); | ||
79 | /* | ||
80 | * If this layer is full mark the bit in the layer above to | ||
81 | * show that this part of the radix tree is full. This may | ||
82 | * complete the layer above and require walking up the radix | ||
83 | * tree. | ||
84 | */ | ||
85 | while (p->bitmap == IDR_FULL) { | ||
86 | if (!(p = pa[++l])) | ||
87 | break; | ||
88 | id = id >> IDR_BITS; | ||
89 | __set_bit((id & IDR_MASK), &p->bitmap); | ||
90 | } | ||
91 | } | ||
92 | |||
73 | /** | 93 | /** |
74 | * idr_pre_get - reserver resources for idr allocation | 94 | * idr_pre_get - reserver resources for idr allocation |
75 | * @idp: idr handle | 95 | * @idp: idr handle |
@@ -95,15 +115,15 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | |||
95 | } | 115 | } |
96 | EXPORT_SYMBOL(idr_pre_get); | 116 | EXPORT_SYMBOL(idr_pre_get); |
97 | 117 | ||
98 | static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) | 118 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
99 | { | 119 | { |
100 | int n, m, sh; | 120 | int n, m, sh; |
101 | struct idr_layer *p, *new; | 121 | struct idr_layer *p, *new; |
102 | struct idr_layer *pa[MAX_LEVEL]; | 122 | int l, id, oid; |
103 | int l, id; | ||
104 | long bm; | 123 | long bm; |
105 | 124 | ||
106 | id = *starting_id; | 125 | id = *starting_id; |
126 | restart: | ||
107 | p = idp->top; | 127 | p = idp->top; |
108 | l = idp->layers; | 128 | l = idp->layers; |
109 | pa[l--] = NULL; | 129 | pa[l--] = NULL; |
@@ -117,12 +137,23 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) | |||
117 | if (m == IDR_SIZE) { | 137 | if (m == IDR_SIZE) { |
118 | /* no space available go back to previous layer. */ | 138 | /* no space available go back to previous layer. */ |
119 | l++; | 139 | l++; |
140 | oid = id; | ||
120 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; | 141 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
142 | |||
143 | /* if already at the top layer, we need to grow */ | ||
121 | if (!(p = pa[l])) { | 144 | if (!(p = pa[l])) { |
122 | *starting_id = id; | 145 | *starting_id = id; |
123 | return -2; | 146 | return -2; |
124 | } | 147 | } |
125 | continue; | 148 | |
149 | /* If we need to go up one layer, continue the | ||
150 | * loop; otherwise, restart from the top. | ||
151 | */ | ||
152 | sh = IDR_BITS * (l + 1); | ||
153 | if (oid >> sh == id >> sh) | ||
154 | continue; | ||
155 | else | ||
156 | goto restart; | ||
126 | } | 157 | } |
127 | if (m != n) { | 158 | if (m != n) { |
128 | sh = IDR_BITS*l; | 159 | sh = IDR_BITS*l; |
@@ -144,30 +175,13 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) | |||
144 | pa[l--] = p; | 175 | pa[l--] = p; |
145 | p = p->ary[m]; | 176 | p = p->ary[m]; |
146 | } | 177 | } |
147 | /* | 178 | |
148 | * We have reached the leaf node, plant the | 179 | pa[l] = p; |
149 | * users pointer and return the raw id. | 180 | return id; |
150 | */ | ||
151 | p->ary[m] = (struct idr_layer *)ptr; | ||
152 | __set_bit(m, &p->bitmap); | ||
153 | p->count++; | ||
154 | /* | ||
155 | * If this layer is full mark the bit in the layer above | ||
156 | * to show that this part of the radix tree is full. | ||
157 | * This may complete the layer above and require walking | ||
158 | * up the radix tree. | ||
159 | */ | ||
160 | n = id; | ||
161 | while (p->bitmap == IDR_FULL) { | ||
162 | if (!(p = pa[++l])) | ||
163 | break; | ||
164 | n = n >> IDR_BITS; | ||
165 | __set_bit((n & IDR_MASK), &p->bitmap); | ||
166 | } | ||
167 | return(id); | ||
168 | } | 181 | } |
169 | 182 | ||
170 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | 183 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
184 | struct idr_layer **pa) | ||
171 | { | 185 | { |
172 | struct idr_layer *p, *new; | 186 | struct idr_layer *p, *new; |
173 | int layers, v, id; | 187 | int layers, v, id; |
@@ -213,12 +227,31 @@ build_up: | |||
213 | } | 227 | } |
214 | idp->top = p; | 228 | idp->top = p; |
215 | idp->layers = layers; | 229 | idp->layers = layers; |
216 | v = sub_alloc(idp, ptr, &id); | 230 | v = sub_alloc(idp, &id, pa); |
217 | if (v == -2) | 231 | if (v == -2) |
218 | goto build_up; | 232 | goto build_up; |
219 | return(v); | 233 | return(v); |
220 | } | 234 | } |
221 | 235 | ||
236 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | ||
237 | { | ||
238 | struct idr_layer *pa[MAX_LEVEL]; | ||
239 | int id; | ||
240 | |||
241 | id = idr_get_empty_slot(idp, starting_id, pa); | ||
242 | if (id >= 0) { | ||
243 | /* | ||
244 | * Successfully found an empty slot. Install the user | ||
245 | * pointer and mark the slot full. | ||
246 | */ | ||
247 | pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; | ||
248 | pa[0]->count++; | ||
249 | idr_mark_full(pa, id); | ||
250 | } | ||
251 | |||
252 | return id; | ||
253 | } | ||
254 | |||
222 | /** | 255 | /** |
223 | * idr_get_new_above - allocate new idr entry above or equal to a start id | 256 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
224 | * @idp: idr handle | 257 | * @idp: idr handle |
@@ -473,3 +506,248 @@ void idr_init(struct idr *idp) | |||
473 | spin_lock_init(&idp->lock); | 506 | spin_lock_init(&idp->lock); |
474 | } | 507 | } |
475 | EXPORT_SYMBOL(idr_init); | 508 | EXPORT_SYMBOL(idr_init); |
509 | |||
510 | |||
511 | /* | ||
512 | * IDA - IDR based ID allocator | ||
513 | * | ||
514 | * this is id allocator without id -> pointer translation. Memory | ||
515 | * usage is much lower than full blown idr because each id only | ||
516 | * occupies a bit. ida uses a custom leaf node which contains | ||
517 | * IDA_BITMAP_BITS slots. | ||
518 | * | ||
519 | * 2007-04-25 written by Tejun Heo <htejun@gmail.com> | ||
520 | */ | ||
521 | |||
522 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | ||
523 | { | ||
524 | unsigned long flags; | ||
525 | |||
526 | if (!ida->free_bitmap) { | ||
527 | spin_lock_irqsave(&ida->idr.lock, flags); | ||
528 | if (!ida->free_bitmap) { | ||
529 | ida->free_bitmap = bitmap; | ||
530 | bitmap = NULL; | ||
531 | } | ||
532 | spin_unlock_irqrestore(&ida->idr.lock, flags); | ||
533 | } | ||
534 | |||
535 | kfree(bitmap); | ||
536 | } | ||
537 | |||
538 | /** | ||
539 | * ida_pre_get - reserve resources for ida allocation | ||
540 | * @ida: ida handle | ||
541 | * @gfp_mask: memory allocation flag | ||
542 | * | ||
543 | * This function should be called prior to locking and calling the | ||
544 | * following function. It preallocates enough memory to satisfy the | ||
545 | * worst possible allocation. | ||
546 | * | ||
547 | * If the system is REALLY out of memory this function returns 0, | ||
548 | * otherwise 1. | ||
549 | */ | ||
550 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | ||
551 | { | ||
552 | /* allocate idr_layers */ | ||
553 | if (!idr_pre_get(&ida->idr, gfp_mask)) | ||
554 | return 0; | ||
555 | |||
556 | /* allocate free_bitmap */ | ||
557 | if (!ida->free_bitmap) { | ||
558 | struct ida_bitmap *bitmap; | ||
559 | |||
560 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); | ||
561 | if (!bitmap) | ||
562 | return 0; | ||
563 | |||
564 | free_bitmap(ida, bitmap); | ||
565 | } | ||
566 | |||
567 | return 1; | ||
568 | } | ||
569 | EXPORT_SYMBOL(ida_pre_get); | ||
570 | |||
571 | /** | ||
572 | * ida_get_new_above - allocate new ID above or equal to a start id | ||
573 | * @ida: ida handle | ||
574 | * @staring_id: id to start search at | ||
575 | * @p_id: pointer to the allocated handle | ||
576 | * | ||
577 | * Allocate new ID above or equal to @ida. It should be called with | ||
578 | * any required locks. | ||
579 | * | ||
580 | * If memory is required, it will return -EAGAIN, you should unlock | ||
581 | * and go back to the ida_pre_get() call. If the ida is full, it will | ||
582 | * return -ENOSPC. | ||
583 | * | ||
584 | * @p_id returns a value in the range 0 ... 0x7fffffff. | ||
585 | */ | ||
586 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | ||
587 | { | ||
588 | struct idr_layer *pa[MAX_LEVEL]; | ||
589 | struct ida_bitmap *bitmap; | ||
590 | unsigned long flags; | ||
591 | int idr_id = starting_id / IDA_BITMAP_BITS; | ||
592 | int offset = starting_id % IDA_BITMAP_BITS; | ||
593 | int t, id; | ||
594 | |||
595 | restart: | ||
596 | /* get vacant slot */ | ||
597 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | ||
598 | if (t < 0) { | ||
599 | if (t == -1) | ||
600 | return -EAGAIN; | ||
601 | else /* will be -3 */ | ||
602 | return -ENOSPC; | ||
603 | } | ||
604 | |||
605 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) | ||
606 | return -ENOSPC; | ||
607 | |||
608 | if (t != idr_id) | ||
609 | offset = 0; | ||
610 | idr_id = t; | ||
611 | |||
612 | /* if bitmap isn't there, create a new one */ | ||
613 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; | ||
614 | if (!bitmap) { | ||
615 | spin_lock_irqsave(&ida->idr.lock, flags); | ||
616 | bitmap = ida->free_bitmap; | ||
617 | ida->free_bitmap = NULL; | ||
618 | spin_unlock_irqrestore(&ida->idr.lock, flags); | ||
619 | |||
620 | if (!bitmap) | ||
621 | return -EAGAIN; | ||
622 | |||
623 | memset(bitmap, 0, sizeof(struct ida_bitmap)); | ||
624 | pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; | ||
625 | pa[0]->count++; | ||
626 | } | ||
627 | |||
628 | /* lookup for empty slot */ | ||
629 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); | ||
630 | if (t == IDA_BITMAP_BITS) { | ||
631 | /* no empty slot after offset, continue to the next chunk */ | ||
632 | idr_id++; | ||
633 | offset = 0; | ||
634 | goto restart; | ||
635 | } | ||
636 | |||
637 | id = idr_id * IDA_BITMAP_BITS + t; | ||
638 | if (id >= MAX_ID_BIT) | ||
639 | return -ENOSPC; | ||
640 | |||
641 | __set_bit(t, bitmap->bitmap); | ||
642 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) | ||
643 | idr_mark_full(pa, idr_id); | ||
644 | |||
645 | *p_id = id; | ||
646 | |||
647 | /* Each leaf node can handle nearly a thousand slots and the | ||
648 | * whole idea of ida is to have small memory foot print. | ||
649 | * Throw away extra resources one by one after each successful | ||
650 | * allocation. | ||
651 | */ | ||
652 | if (ida->idr.id_free_cnt || ida->free_bitmap) { | ||
653 | struct idr_layer *p = alloc_layer(&ida->idr); | ||
654 | if (p) | ||
655 | kmem_cache_free(idr_layer_cache, p); | ||
656 | } | ||
657 | |||
658 | return 0; | ||
659 | } | ||
660 | EXPORT_SYMBOL(ida_get_new_above); | ||
661 | |||
662 | /** | ||
663 | * ida_get_new - allocate new ID | ||
664 | * @ida: idr handle | ||
665 | * @p_id: pointer to the allocated handle | ||
666 | * | ||
667 | * Allocate new ID. It should be called with any required locks. | ||
668 | * | ||
669 | * If memory is required, it will return -EAGAIN, you should unlock | ||
670 | * and go back to the idr_pre_get() call. If the idr is full, it will | ||
671 | * return -ENOSPC. | ||
672 | * | ||
673 | * @id returns a value in the range 0 ... 0x7fffffff. | ||
674 | */ | ||
675 | int ida_get_new(struct ida *ida, int *p_id) | ||
676 | { | ||
677 | return ida_get_new_above(ida, 0, p_id); | ||
678 | } | ||
679 | EXPORT_SYMBOL(ida_get_new); | ||
680 | |||
681 | /** | ||
682 | * ida_remove - remove the given ID | ||
683 | * @ida: ida handle | ||
684 | * @id: ID to free | ||
685 | */ | ||
686 | void ida_remove(struct ida *ida, int id) | ||
687 | { | ||
688 | struct idr_layer *p = ida->idr.top; | ||
689 | int shift = (ida->idr.layers - 1) * IDR_BITS; | ||
690 | int idr_id = id / IDA_BITMAP_BITS; | ||
691 | int offset = id % IDA_BITMAP_BITS; | ||
692 | int n; | ||
693 | struct ida_bitmap *bitmap; | ||
694 | |||
695 | /* clear full bits while looking up the leaf idr_layer */ | ||
696 | while ((shift > 0) && p) { | ||
697 | n = (idr_id >> shift) & IDR_MASK; | ||
698 | __clear_bit(n, &p->bitmap); | ||
699 | p = p->ary[n]; | ||
700 | shift -= IDR_BITS; | ||
701 | } | ||
702 | |||
703 | if (p == NULL) | ||
704 | goto err; | ||
705 | |||
706 | n = idr_id & IDR_MASK; | ||
707 | __clear_bit(n, &p->bitmap); | ||
708 | |||
709 | bitmap = (void *)p->ary[n]; | ||
710 | if (!test_bit(offset, bitmap->bitmap)) | ||
711 | goto err; | ||
712 | |||
713 | /* update bitmap and remove it if empty */ | ||
714 | __clear_bit(offset, bitmap->bitmap); | ||
715 | if (--bitmap->nr_busy == 0) { | ||
716 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ | ||
717 | idr_remove(&ida->idr, idr_id); | ||
718 | free_bitmap(ida, bitmap); | ||
719 | } | ||
720 | |||
721 | return; | ||
722 | |||
723 | err: | ||
724 | printk(KERN_WARNING | ||
725 | "ida_remove called for id=%d which is not allocated.\n", id); | ||
726 | } | ||
727 | EXPORT_SYMBOL(ida_remove); | ||
728 | |||
729 | /** | ||
730 | * ida_destroy - release all cached layers within an ida tree | ||
731 | * ida: ida handle | ||
732 | */ | ||
733 | void ida_destroy(struct ida *ida) | ||
734 | { | ||
735 | idr_destroy(&ida->idr); | ||
736 | kfree(ida->free_bitmap); | ||
737 | } | ||
738 | EXPORT_SYMBOL(ida_destroy); | ||
739 | |||
740 | /** | ||
741 | * ida_init - initialize ida handle | ||
742 | * @ida: ida handle | ||
743 | * | ||
744 | * This function is use to set up the handle (@ida) that you will pass | ||
745 | * to the rest of the functions. | ||
746 | */ | ||
747 | void ida_init(struct ida *ida) | ||
748 | { | ||
749 | memset(ida, 0, sizeof(struct ida)); | ||
750 | idr_init(&ida->idr); | ||
751 | |||
752 | } | ||
753 | EXPORT_SYMBOL(ida_init); | ||
diff --git a/lib/kobject.c b/lib/kobject.c index ac1520651b9b..4b08e0ff95c8 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -44,7 +44,7 @@ static int populate_dir(struct kobject * kobj) | |||
44 | return error; | 44 | return error; |
45 | } | 45 | } |
46 | 46 | ||
47 | static int create_dir(struct kobject * kobj, struct dentry *shadow_parent) | 47 | static int create_dir(struct kobject *kobj, struct sysfs_dirent *shadow_parent) |
48 | { | 48 | { |
49 | int error = 0; | 49 | int error = 0; |
50 | if (kobject_name(kobj)) { | 50 | if (kobject_name(kobj)) { |
@@ -162,7 +162,7 @@ static void unlink(struct kobject * kobj) | |||
162 | * @shadow_parent: sysfs directory to add to. | 162 | * @shadow_parent: sysfs directory to add to. |
163 | */ | 163 | */ |
164 | 164 | ||
165 | int kobject_shadow_add(struct kobject * kobj, struct dentry *shadow_parent) | 165 | int kobject_shadow_add(struct kobject *kobj, struct sysfs_dirent *shadow_parent) |
166 | { | 166 | { |
167 | int error = 0; | 167 | int error = 0; |
168 | struct kobject * parent; | 168 | struct kobject * parent; |
@@ -338,7 +338,7 @@ int kobject_rename(struct kobject * kobj, const char *new_name) | |||
338 | /* Note : if we want to send the new name alone, not the full path, | 338 | /* Note : if we want to send the new name alone, not the full path, |
339 | * we could probably use kobject_name(kobj); */ | 339 | * we could probably use kobject_name(kobj); */ |
340 | 340 | ||
341 | error = sysfs_rename_dir(kobj, kobj->parent->dentry, new_name); | 341 | error = sysfs_rename_dir(kobj, kobj->parent->sd, new_name); |
342 | 342 | ||
343 | /* This function is mostly/only used for network interface. | 343 | /* This function is mostly/only used for network interface. |
344 | * Some hotplug package track interfaces by their name and | 344 | * Some hotplug package track interfaces by their name and |
@@ -361,8 +361,8 @@ out: | |||
361 | * @new_name: object's new name | 361 | * @new_name: object's new name |
362 | */ | 362 | */ |
363 | 363 | ||
364 | int kobject_shadow_rename(struct kobject * kobj, struct dentry *new_parent, | 364 | int kobject_shadow_rename(struct kobject *kobj, |
365 | const char *new_name) | 365 | struct sysfs_dirent *new_parent, const char *new_name) |
366 | { | 366 | { |
367 | int error = 0; | 367 | int error = 0; |
368 | 368 | ||
@@ -597,10 +597,17 @@ int kset_add(struct kset * k) | |||
597 | 597 | ||
598 | int kset_register(struct kset * k) | 598 | int kset_register(struct kset * k) |
599 | { | 599 | { |
600 | int err; | ||
601 | |||
600 | if (!k) | 602 | if (!k) |
601 | return -EINVAL; | 603 | return -EINVAL; |
604 | |||
602 | kset_init(k); | 605 | kset_init(k); |
603 | return kset_add(k); | 606 | err = kset_add(k); |
607 | if (err) | ||
608 | return err; | ||
609 | kobject_uevent(&k->kobj, KOBJ_ADD); | ||
610 | return 0; | ||
604 | } | 611 | } |
605 | 612 | ||
606 | 613 | ||