diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 9 | ||||
-rw-r--r-- | lib/Kconfig.debug | 21 | ||||
-rw-r--r-- | lib/Makefile | 3 | ||||
-rw-r--r-- | lib/bitmap.c | 174 | ||||
-rw-r--r-- | lib/find_next_bit.c | 77 | ||||
-rw-r--r-- | lib/kernel_lock.c | 2 | ||||
-rw-r--r-- | lib/kobject.c | 19 | ||||
-rw-r--r-- | lib/lmb.c | 428 | ||||
-rw-r--r-- | lib/radix-tree.c | 9 | ||||
-rw-r--r-- | lib/reed_solomon/reed_solomon.c | 1 |
10 files changed, 715 insertions, 28 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index ba3d104994d9..8cc8e8722a3f 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -7,6 +7,12 @@ menu "Library routines" | |||
7 | config BITREVERSE | 7 | config BITREVERSE |
8 | tristate | 8 | tristate |
9 | 9 | ||
10 | config GENERIC_FIND_FIRST_BIT | ||
11 | def_bool n | ||
12 | |||
13 | config GENERIC_FIND_NEXT_BIT | ||
14 | def_bool n | ||
15 | |||
10 | config CRC_CCITT | 16 | config CRC_CCITT |
11 | tristate "CRC-CCITT functions" | 17 | tristate "CRC-CCITT functions" |
12 | help | 18 | help |
@@ -141,4 +147,7 @@ config HAS_DMA | |||
141 | config CHECK_SIGNATURE | 147 | config CHECK_SIGNATURE |
142 | bool | 148 | bool |
143 | 149 | ||
150 | config HAVE_LMB | ||
151 | boolean | ||
152 | |||
144 | endmenu | 153 | endmenu |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 95de3102bc87..754cc0027f2a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -25,6 +25,17 @@ config ENABLE_MUST_CHECK | |||
25 | suppress the "warning: ignoring return value of 'foo', declared with | 25 | suppress the "warning: ignoring return value of 'foo', declared with |
26 | attribute warn_unused_result" messages. | 26 | attribute warn_unused_result" messages. |
27 | 27 | ||
28 | config FRAME_WARN | ||
29 | int "Warn for stack frames larger than (needs gcc 4.4)" | ||
30 | range 0 8192 | ||
31 | default 1024 if !64BIT | ||
32 | default 2048 if 64BIT | ||
33 | help | ||
34 | Tell gcc to warn at build time for stack frames larger than this. | ||
35 | Setting this too low will cause a lot of warnings. | ||
36 | Setting it to 0 disables the warning. | ||
37 | Requires gcc 4.4 | ||
38 | |||
28 | config MAGIC_SYSRQ | 39 | config MAGIC_SYSRQ |
29 | bool "Magic SysRq key" | 40 | bool "Magic SysRq key" |
30 | depends on !UML | 41 | depends on !UML |
@@ -427,6 +438,16 @@ config DEBUG_VM | |||
427 | 438 | ||
428 | If unsure, say N. | 439 | If unsure, say N. |
429 | 440 | ||
441 | config DEBUG_WRITECOUNT | ||
442 | bool "Debug filesystem writers count" | ||
443 | depends on DEBUG_KERNEL | ||
444 | help | ||
445 | Enable this to catch wrong use of the writers count in struct | ||
446 | vfsmount. This will increase the size of each file struct by | ||
447 | 32 bits. | ||
448 | |||
449 | If unsure, say N. | ||
450 | |||
430 | config DEBUG_LIST | 451 | config DEBUG_LIST |
431 | bool "Debug linked list manipulation" | 452 | bool "Debug linked list manipulation" |
432 | depends on DEBUG_KERNEL | 453 | depends on DEBUG_KERNEL |
diff --git a/lib/Makefile b/lib/Makefile index 4d7649c326f6..2d7001b7f5a4 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -29,6 +29,7 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o | |||
29 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | 29 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o |
30 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | 30 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o |
31 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | 31 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o |
32 | lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o | ||
32 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o | 33 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o |
33 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 34 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
34 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o | 35 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o |
@@ -68,6 +69,8 @@ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o | |||
68 | 69 | ||
69 | lib-$(CONFIG_GENERIC_BUG) += bug.o | 70 | lib-$(CONFIG_GENERIC_BUG) += bug.o |
70 | 71 | ||
72 | obj-$(CONFIG_HAVE_LMB) += lmb.o | ||
73 | |||
71 | hostprogs-y := gen_crc32table | 74 | hostprogs-y := gen_crc32table |
72 | clean-files := crc32table.h | 75 | clean-files := crc32table.h |
73 | 76 | ||
diff --git a/lib/bitmap.c b/lib/bitmap.c index 2c9242e3fed0..c4cb48f77f0c 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -316,6 +316,22 @@ int bitmap_scnprintf(char *buf, unsigned int buflen, | |||
316 | EXPORT_SYMBOL(bitmap_scnprintf); | 316 | EXPORT_SYMBOL(bitmap_scnprintf); |
317 | 317 | ||
318 | /** | 318 | /** |
319 | * bitmap_scnprintf_len - return buffer length needed to convert | ||
320 | * bitmap to an ASCII hex string. | ||
321 | * @len: number of bits to be converted | ||
322 | */ | ||
323 | int bitmap_scnprintf_len(unsigned int len) | ||
324 | { | ||
325 | /* we need 9 chars per word for 32 bit words (8 hexdigits + sep/null) */ | ||
326 | int bitslen = ALIGN(len, CHUNKSZ); | ||
327 | int wordlen = CHUNKSZ / 4; | ||
328 | int buflen = (bitslen / wordlen) * (wordlen + 1) * sizeof(char); | ||
329 | |||
330 | return buflen; | ||
331 | } | ||
332 | EXPORT_SYMBOL(bitmap_scnprintf_len); | ||
333 | |||
334 | /** | ||
319 | * __bitmap_parse - convert an ASCII hex string into a bitmap. | 335 | * __bitmap_parse - convert an ASCII hex string into a bitmap. |
320 | * @buf: pointer to buffer containing string. | 336 | * @buf: pointer to buffer containing string. |
321 | * @buflen: buffer size in bytes. If string is smaller than this | 337 | * @buflen: buffer size in bytes. If string is smaller than this |
@@ -698,6 +714,164 @@ int bitmap_bitremap(int oldbit, const unsigned long *old, | |||
698 | } | 714 | } |
699 | EXPORT_SYMBOL(bitmap_bitremap); | 715 | EXPORT_SYMBOL(bitmap_bitremap); |
700 | 716 | ||
717 | /** | ||
718 | * bitmap_onto - translate one bitmap relative to another | ||
719 | * @dst: resulting translated bitmap | ||
720 | * @orig: original untranslated bitmap | ||
721 | * @relmap: bitmap relative to which translated | ||
722 | * @bits: number of bits in each of these bitmaps | ||
723 | * | ||
724 | * Set the n-th bit of @dst iff there exists some m such that the | ||
725 | * n-th bit of @relmap is set, the m-th bit of @orig is set, and | ||
726 | * the n-th bit of @relmap is also the m-th _set_ bit of @relmap. | ||
727 | * (If you understood the previous sentence the first time your | ||
728 | * read it, you're overqualified for your current job.) | ||
729 | * | ||
730 | * In other words, @orig is mapped onto (surjectively) @dst, | ||
731 | * using the the map { <n, m> | the n-th bit of @relmap is the | ||
732 | * m-th set bit of @relmap }. | ||
733 | * | ||
734 | * Any set bits in @orig above bit number W, where W is the | ||
735 | * weight of (number of set bits in) @relmap are mapped nowhere. | ||
736 | * In particular, if for all bits m set in @orig, m >= W, then | ||
737 | * @dst will end up empty. In situations where the possibility | ||
738 | * of such an empty result is not desired, one way to avoid it is | ||
739 | * to use the bitmap_fold() operator, below, to first fold the | ||
740 | * @orig bitmap over itself so that all its set bits x are in the | ||
741 | * range 0 <= x < W. The bitmap_fold() operator does this by | ||
742 | * setting the bit (m % W) in @dst, for each bit (m) set in @orig. | ||
743 | * | ||
744 | * Example [1] for bitmap_onto(): | ||
745 | * Let's say @relmap has bits 30-39 set, and @orig has bits | ||
746 | * 1, 3, 5, 7, 9 and 11 set. Then on return from this routine, | ||
747 | * @dst will have bits 31, 33, 35, 37 and 39 set. | ||
748 | * | ||
749 | * When bit 0 is set in @orig, it means turn on the bit in | ||
750 | * @dst corresponding to whatever is the first bit (if any) | ||
751 | * that is turned on in @relmap. Since bit 0 was off in the | ||
752 | * above example, we leave off that bit (bit 30) in @dst. | ||
753 | * | ||
754 | * When bit 1 is set in @orig (as in the above example), it | ||
755 | * means turn on the bit in @dst corresponding to whatever | ||
756 | * is the second bit that is turned on in @relmap. The second | ||
757 | * bit in @relmap that was turned on in the above example was | ||
758 | * bit 31, so we turned on bit 31 in @dst. | ||
759 | * | ||
760 | * Similarly, we turned on bits 33, 35, 37 and 39 in @dst, | ||
761 | * because they were the 4th, 6th, 8th and 10th set bits | ||
762 | * set in @relmap, and the 4th, 6th, 8th and 10th bits of | ||
763 | * @orig (i.e. bits 3, 5, 7 and 9) were also set. | ||
764 | * | ||
765 | * When bit 11 is set in @orig, it means turn on the bit in | ||
766 | * @dst corresponding to whatever is the twelth bit that is | ||
767 | * turned on in @relmap. In the above example, there were | ||
768 | * only ten bits turned on in @relmap (30..39), so that bit | ||
769 | * 11 was set in @orig had no affect on @dst. | ||
770 | * | ||
771 | * Example [2] for bitmap_fold() + bitmap_onto(): | ||
772 | * Let's say @relmap has these ten bits set: | ||
773 | * 40 41 42 43 45 48 53 61 74 95 | ||
774 | * (for the curious, that's 40 plus the first ten terms of the | ||
775 | * Fibonacci sequence.) | ||
776 | * | ||
777 | * Further lets say we use the following code, invoking | ||
778 | * bitmap_fold() then bitmap_onto, as suggested above to | ||
779 | * avoid the possitility of an empty @dst result: | ||
780 | * | ||
781 | * unsigned long *tmp; // a temporary bitmap's bits | ||
782 | * | ||
783 | * bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits); | ||
784 | * bitmap_onto(dst, tmp, relmap, bits); | ||
785 | * | ||
786 | * Then this table shows what various values of @dst would be, for | ||
787 | * various @orig's. I list the zero-based positions of each set bit. | ||
788 | * The tmp column shows the intermediate result, as computed by | ||
789 | * using bitmap_fold() to fold the @orig bitmap modulo ten | ||
790 | * (the weight of @relmap). | ||
791 | * | ||
792 | * @orig tmp @dst | ||
793 | * 0 0 40 | ||
794 | * 1 1 41 | ||
795 | * 9 9 95 | ||
796 | * 10 0 40 (*) | ||
797 | * 1 3 5 7 1 3 5 7 41 43 48 61 | ||
798 | * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45 | ||
799 | * 0 9 18 27 0 9 8 7 40 61 74 95 | ||
800 | * 0 10 20 30 0 40 | ||
801 | * 0 11 22 33 0 1 2 3 40 41 42 43 | ||
802 | * 0 12 24 36 0 2 4 6 40 42 45 53 | ||
803 | * 78 102 211 1 2 8 41 42 74 (*) | ||
804 | * | ||
805 | * (*) For these marked lines, if we hadn't first done bitmap_fold() | ||
806 | * into tmp, then the @dst result would have been empty. | ||
807 | * | ||
808 | * If either of @orig or @relmap is empty (no set bits), then @dst | ||
809 | * will be returned empty. | ||
810 | * | ||
811 | * If (as explained above) the only set bits in @orig are in positions | ||
812 | * m where m >= W, (where W is the weight of @relmap) then @dst will | ||
813 | * once again be returned empty. | ||
814 | * | ||
815 | * All bits in @dst not set by the above rule are cleared. | ||
816 | */ | ||
817 | void bitmap_onto(unsigned long *dst, const unsigned long *orig, | ||
818 | const unsigned long *relmap, int bits) | ||
819 | { | ||
820 | int n, m; /* same meaning as in above comment */ | ||
821 | |||
822 | if (dst == orig) /* following doesn't handle inplace mappings */ | ||
823 | return; | ||
824 | bitmap_zero(dst, bits); | ||
825 | |||
826 | /* | ||
827 | * The following code is a more efficient, but less | ||
828 | * obvious, equivalent to the loop: | ||
829 | * for (m = 0; m < bitmap_weight(relmap, bits); m++) { | ||
830 | * n = bitmap_ord_to_pos(orig, m, bits); | ||
831 | * if (test_bit(m, orig)) | ||
832 | * set_bit(n, dst); | ||
833 | * } | ||
834 | */ | ||
835 | |||
836 | m = 0; | ||
837 | for (n = find_first_bit(relmap, bits); | ||
838 | n < bits; | ||
839 | n = find_next_bit(relmap, bits, n + 1)) { | ||
840 | /* m == bitmap_pos_to_ord(relmap, n, bits) */ | ||
841 | if (test_bit(m, orig)) | ||
842 | set_bit(n, dst); | ||
843 | m++; | ||
844 | } | ||
845 | } | ||
846 | EXPORT_SYMBOL(bitmap_onto); | ||
847 | |||
848 | /** | ||
849 | * bitmap_fold - fold larger bitmap into smaller, modulo specified size | ||
850 | * @dst: resulting smaller bitmap | ||
851 | * @orig: original larger bitmap | ||
852 | * @sz: specified size | ||
853 | * @bits: number of bits in each of these bitmaps | ||
854 | * | ||
855 | * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst. | ||
856 | * Clear all other bits in @dst. See further the comment and | ||
857 | * Example [2] for bitmap_onto() for why and how to use this. | ||
858 | */ | ||
859 | void bitmap_fold(unsigned long *dst, const unsigned long *orig, | ||
860 | int sz, int bits) | ||
861 | { | ||
862 | int oldbit; | ||
863 | |||
864 | if (dst == orig) /* following doesn't handle inplace mappings */ | ||
865 | return; | ||
866 | bitmap_zero(dst, bits); | ||
867 | |||
868 | for (oldbit = find_first_bit(orig, bits); | ||
869 | oldbit < bits; | ||
870 | oldbit = find_next_bit(orig, bits, oldbit + 1)) | ||
871 | set_bit(oldbit % sz, dst); | ||
872 | } | ||
873 | EXPORT_SYMBOL(bitmap_fold); | ||
874 | |||
701 | /* | 875 | /* |
702 | * Common code for bitmap_*_region() routines. | 876 | * Common code for bitmap_*_region() routines. |
703 | * bitmap: array of unsigned longs corresponding to the bitmap | 877 | * bitmap: array of unsigned longs corresponding to the bitmap |
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index 78ccd73a8841..d3f5784807b4 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c | |||
@@ -16,14 +16,12 @@ | |||
16 | 16 | ||
17 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | 17 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) |
18 | 18 | ||
19 | /** | 19 | #ifdef CONFIG_GENERIC_FIND_NEXT_BIT |
20 | * find_next_bit - find the next set bit in a memory region | 20 | /* |
21 | * @addr: The address to base the search on | 21 | * Find the next set bit in a memory region. |
22 | * @offset: The bitnumber to start searching at | ||
23 | * @size: The maximum size to search | ||
24 | */ | 22 | */ |
25 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, | 23 | unsigned long __find_next_bit(const unsigned long *addr, |
26 | unsigned long offset) | 24 | unsigned long size, unsigned long offset) |
27 | { | 25 | { |
28 | const unsigned long *p = addr + BITOP_WORD(offset); | 26 | const unsigned long *p = addr + BITOP_WORD(offset); |
29 | unsigned long result = offset & ~(BITS_PER_LONG-1); | 27 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
@@ -60,15 +58,14 @@ found_first: | |||
60 | found_middle: | 58 | found_middle: |
61 | return result + __ffs(tmp); | 59 | return result + __ffs(tmp); |
62 | } | 60 | } |
63 | 61 | EXPORT_SYMBOL(__find_next_bit); | |
64 | EXPORT_SYMBOL(find_next_bit); | ||
65 | 62 | ||
66 | /* | 63 | /* |
67 | * This implementation of find_{first,next}_zero_bit was stolen from | 64 | * This implementation of find_{first,next}_zero_bit was stolen from |
68 | * Linus' asm-alpha/bitops.h. | 65 | * Linus' asm-alpha/bitops.h. |
69 | */ | 66 | */ |
70 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, | 67 | unsigned long __find_next_zero_bit(const unsigned long *addr, |
71 | unsigned long offset) | 68 | unsigned long size, unsigned long offset) |
72 | { | 69 | { |
73 | const unsigned long *p = addr + BITOP_WORD(offset); | 70 | const unsigned long *p = addr + BITOP_WORD(offset); |
74 | unsigned long result = offset & ~(BITS_PER_LONG-1); | 71 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
@@ -105,8 +102,64 @@ found_first: | |||
105 | found_middle: | 102 | found_middle: |
106 | return result + ffz(tmp); | 103 | return result + ffz(tmp); |
107 | } | 104 | } |
105 | EXPORT_SYMBOL(__find_next_zero_bit); | ||
106 | #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ | ||
107 | |||
108 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT | ||
109 | /* | ||
110 | * Find the first set bit in a memory region. | ||
111 | */ | ||
112 | unsigned long __find_first_bit(const unsigned long *addr, | ||
113 | unsigned long size) | ||
114 | { | ||
115 | const unsigned long *p = addr; | ||
116 | unsigned long result = 0; | ||
117 | unsigned long tmp; | ||
108 | 118 | ||
109 | EXPORT_SYMBOL(find_next_zero_bit); | 119 | while (size & ~(BITS_PER_LONG-1)) { |
120 | if ((tmp = *(p++))) | ||
121 | goto found; | ||
122 | result += BITS_PER_LONG; | ||
123 | size -= BITS_PER_LONG; | ||
124 | } | ||
125 | if (!size) | ||
126 | return result; | ||
127 | |||
128 | tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); | ||
129 | if (tmp == 0UL) /* Are any bits set? */ | ||
130 | return result + size; /* Nope. */ | ||
131 | found: | ||
132 | return result + __ffs(tmp); | ||
133 | } | ||
134 | EXPORT_SYMBOL(__find_first_bit); | ||
135 | |||
136 | /* | ||
137 | * Find the first cleared bit in a memory region. | ||
138 | */ | ||
139 | unsigned long __find_first_zero_bit(const unsigned long *addr, | ||
140 | unsigned long size) | ||
141 | { | ||
142 | const unsigned long *p = addr; | ||
143 | unsigned long result = 0; | ||
144 | unsigned long tmp; | ||
145 | |||
146 | while (size & ~(BITS_PER_LONG-1)) { | ||
147 | if (~(tmp = *(p++))) | ||
148 | goto found; | ||
149 | result += BITS_PER_LONG; | ||
150 | size -= BITS_PER_LONG; | ||
151 | } | ||
152 | if (!size) | ||
153 | return result; | ||
154 | |||
155 | tmp = (*p) | (~0UL << size); | ||
156 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
157 | return result + size; /* Nope. */ | ||
158 | found: | ||
159 | return result + ffz(tmp); | ||
160 | } | ||
161 | EXPORT_SYMBOL(__find_first_zero_bit); | ||
162 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ | ||
110 | 163 | ||
111 | #ifdef __BIG_ENDIAN | 164 | #ifdef __BIG_ENDIAN |
112 | 165 | ||
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index fbc11a336bc5..cd3e82530b03 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <linux/smp_lock.h> | 8 | #include <linux/smp_lock.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/kallsyms.h> | 10 | #include <linux/kallsyms.h> |
11 | #include <asm/semaphore.h> | 11 | #include <linux/semaphore.h> |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * The 'big kernel semaphore' | 14 | * The 'big kernel semaphore' |
diff --git a/lib/kobject.c b/lib/kobject.c index 0d03252f87a8..2c6490370922 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -58,11 +58,6 @@ static int create_dir(struct kobject *kobj) | |||
58 | return error; | 58 | return error; |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline struct kobject *to_kobj(struct list_head *entry) | ||
62 | { | ||
63 | return container_of(entry, struct kobject, entry); | ||
64 | } | ||
65 | |||
66 | static int get_kobj_path_length(struct kobject *kobj) | 61 | static int get_kobj_path_length(struct kobject *kobj) |
67 | { | 62 | { |
68 | int length = 1; | 63 | int length = 1; |
@@ -592,8 +587,15 @@ static void kobject_release(struct kref *kref) | |||
592 | */ | 587 | */ |
593 | void kobject_put(struct kobject *kobj) | 588 | void kobject_put(struct kobject *kobj) |
594 | { | 589 | { |
595 | if (kobj) | 590 | if (kobj) { |
591 | if (!kobj->state_initialized) { | ||
592 | printk(KERN_WARNING "kobject: '%s' (%p): is not " | ||
593 | "initialized, yet kobject_put() is being " | ||
594 | "called.\n", kobject_name(kobj), kobj); | ||
595 | WARN_ON(1); | ||
596 | } | ||
596 | kref_put(&kobj->kref, kobject_release); | 597 | kref_put(&kobj->kref, kobject_release); |
598 | } | ||
597 | } | 599 | } |
598 | 600 | ||
599 | static void dynamic_kobj_release(struct kobject *kobj) | 601 | static void dynamic_kobj_release(struct kobject *kobj) |
@@ -745,12 +747,11 @@ void kset_unregister(struct kset *k) | |||
745 | */ | 747 | */ |
746 | struct kobject *kset_find_obj(struct kset *kset, const char *name) | 748 | struct kobject *kset_find_obj(struct kset *kset, const char *name) |
747 | { | 749 | { |
748 | struct list_head *entry; | 750 | struct kobject *k; |
749 | struct kobject *ret = NULL; | 751 | struct kobject *ret = NULL; |
750 | 752 | ||
751 | spin_lock(&kset->list_lock); | 753 | spin_lock(&kset->list_lock); |
752 | list_for_each(entry, &kset->list) { | 754 | list_for_each_entry(k, &kset->list, entry) { |
753 | struct kobject *k = to_kobj(entry); | ||
754 | if (kobject_name(k) && !strcmp(kobject_name(k), name)) { | 755 | if (kobject_name(k) && !strcmp(kobject_name(k), name)) { |
755 | ret = kobject_get(k); | 756 | ret = kobject_get(k); |
756 | break; | 757 | break; |
diff --git a/lib/lmb.c b/lib/lmb.c new file mode 100644 index 000000000000..207147ab25e4 --- /dev/null +++ b/lib/lmb.c | |||
@@ -0,0 +1,428 @@ | |||
1 | /* | ||
2 | * Procedures for maintaining information about logical memory blocks. | ||
3 | * | ||
4 | * Peter Bergner, IBM Corp. June 2001. | ||
5 | * Copyright (C) 2001 Peter Bergner. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/bitops.h> | ||
16 | #include <linux/lmb.h> | ||
17 | |||
18 | #define LMB_ALLOC_ANYWHERE 0 | ||
19 | |||
20 | struct lmb lmb; | ||
21 | |||
22 | void lmb_dump_all(void) | ||
23 | { | ||
24 | #ifdef DEBUG | ||
25 | unsigned long i; | ||
26 | |||
27 | pr_debug("lmb_dump_all:\n"); | ||
28 | pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt); | ||
29 | pr_debug(" memory.size = 0x%llx\n", | ||
30 | (unsigned long long)lmb.memory.size); | ||
31 | for (i=0; i < lmb.memory.cnt ;i++) { | ||
32 | pr_debug(" memory.region[0x%x].base = 0x%llx\n", | ||
33 | i, (unsigned long long)lmb.memory.region[i].base); | ||
34 | pr_debug(" .size = 0x%llx\n", | ||
35 | (unsigned long long)lmb.memory.region[i].size); | ||
36 | } | ||
37 | |||
38 | pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); | ||
39 | pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size); | ||
40 | for (i=0; i < lmb.reserved.cnt ;i++) { | ||
41 | pr_debug(" reserved.region[0x%x].base = 0x%llx\n", | ||
42 | i, (unsigned long long)lmb.reserved.region[i].base); | ||
43 | pr_debug(" .size = 0x%llx\n", | ||
44 | (unsigned long long)lmb.reserved.region[i].size); | ||
45 | } | ||
46 | #endif /* DEBUG */ | ||
47 | } | ||
48 | |||
49 | static unsigned long __init lmb_addrs_overlap(u64 base1, u64 size1, | ||
50 | u64 base2, u64 size2) | ||
51 | { | ||
52 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | ||
53 | } | ||
54 | |||
55 | static long __init lmb_addrs_adjacent(u64 base1, u64 size1, | ||
56 | u64 base2, u64 size2) | ||
57 | { | ||
58 | if (base2 == base1 + size1) | ||
59 | return 1; | ||
60 | else if (base1 == base2 + size2) | ||
61 | return -1; | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static long __init lmb_regions_adjacent(struct lmb_region *rgn, | ||
67 | unsigned long r1, unsigned long r2) | ||
68 | { | ||
69 | u64 base1 = rgn->region[r1].base; | ||
70 | u64 size1 = rgn->region[r1].size; | ||
71 | u64 base2 = rgn->region[r2].base; | ||
72 | u64 size2 = rgn->region[r2].size; | ||
73 | |||
74 | return lmb_addrs_adjacent(base1, size1, base2, size2); | ||
75 | } | ||
76 | |||
77 | static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) | ||
78 | { | ||
79 | unsigned long i; | ||
80 | |||
81 | for (i = r; i < rgn->cnt - 1; i++) { | ||
82 | rgn->region[i].base = rgn->region[i + 1].base; | ||
83 | rgn->region[i].size = rgn->region[i + 1].size; | ||
84 | } | ||
85 | rgn->cnt--; | ||
86 | } | ||
87 | |||
88 | /* Assumption: base addr of region 1 < base addr of region 2 */ | ||
89 | static void __init lmb_coalesce_regions(struct lmb_region *rgn, | ||
90 | unsigned long r1, unsigned long r2) | ||
91 | { | ||
92 | rgn->region[r1].size += rgn->region[r2].size; | ||
93 | lmb_remove_region(rgn, r2); | ||
94 | } | ||
95 | |||
96 | void __init lmb_init(void) | ||
97 | { | ||
98 | /* Create a dummy zero size LMB which will get coalesced away later. | ||
99 | * This simplifies the lmb_add() code below... | ||
100 | */ | ||
101 | lmb.memory.region[0].base = 0; | ||
102 | lmb.memory.region[0].size = 0; | ||
103 | lmb.memory.cnt = 1; | ||
104 | |||
105 | /* Ditto. */ | ||
106 | lmb.reserved.region[0].base = 0; | ||
107 | lmb.reserved.region[0].size = 0; | ||
108 | lmb.reserved.cnt = 1; | ||
109 | } | ||
110 | |||
111 | void __init lmb_analyze(void) | ||
112 | { | ||
113 | int i; | ||
114 | |||
115 | lmb.memory.size = 0; | ||
116 | |||
117 | for (i = 0; i < lmb.memory.cnt; i++) | ||
118 | lmb.memory.size += lmb.memory.region[i].size; | ||
119 | } | ||
120 | |||
121 | static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) | ||
122 | { | ||
123 | unsigned long coalesced = 0; | ||
124 | long adjacent, i; | ||
125 | |||
126 | if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { | ||
127 | rgn->region[0].base = base; | ||
128 | rgn->region[0].size = size; | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | /* First try and coalesce this LMB with another. */ | ||
133 | for (i = 0; i < rgn->cnt; i++) { | ||
134 | u64 rgnbase = rgn->region[i].base; | ||
135 | u64 rgnsize = rgn->region[i].size; | ||
136 | |||
137 | if ((rgnbase == base) && (rgnsize == size)) | ||
138 | /* Already have this region, so we're done */ | ||
139 | return 0; | ||
140 | |||
141 | adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); | ||
142 | if (adjacent > 0) { | ||
143 | rgn->region[i].base -= size; | ||
144 | rgn->region[i].size += size; | ||
145 | coalesced++; | ||
146 | break; | ||
147 | } else if (adjacent < 0) { | ||
148 | rgn->region[i].size += size; | ||
149 | coalesced++; | ||
150 | break; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) { | ||
155 | lmb_coalesce_regions(rgn, i, i+1); | ||
156 | coalesced++; | ||
157 | } | ||
158 | |||
159 | if (coalesced) | ||
160 | return coalesced; | ||
161 | if (rgn->cnt >= MAX_LMB_REGIONS) | ||
162 | return -1; | ||
163 | |||
164 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ | ||
165 | for (i = rgn->cnt - 1; i >= 0; i--) { | ||
166 | if (base < rgn->region[i].base) { | ||
167 | rgn->region[i+1].base = rgn->region[i].base; | ||
168 | rgn->region[i+1].size = rgn->region[i].size; | ||
169 | } else { | ||
170 | rgn->region[i+1].base = base; | ||
171 | rgn->region[i+1].size = size; | ||
172 | break; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | if (base < rgn->region[0].base) { | ||
177 | rgn->region[0].base = base; | ||
178 | rgn->region[0].size = size; | ||
179 | } | ||
180 | rgn->cnt++; | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | long __init lmb_add(u64 base, u64 size) | ||
186 | { | ||
187 | struct lmb_region *_rgn = &lmb.memory; | ||
188 | |||
189 | /* On pSeries LPAR systems, the first LMB is our RMO region. */ | ||
190 | if (base == 0) | ||
191 | lmb.rmo_size = size; | ||
192 | |||
193 | return lmb_add_region(_rgn, base, size); | ||
194 | |||
195 | } | ||
196 | |||
197 | long __init lmb_reserve(u64 base, u64 size) | ||
198 | { | ||
199 | struct lmb_region *_rgn = &lmb.reserved; | ||
200 | |||
201 | BUG_ON(0 == size); | ||
202 | |||
203 | return lmb_add_region(_rgn, base, size); | ||
204 | } | ||
205 | |||
206 | long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) | ||
207 | { | ||
208 | unsigned long i; | ||
209 | |||
210 | for (i = 0; i < rgn->cnt; i++) { | ||
211 | u64 rgnbase = rgn->region[i].base; | ||
212 | u64 rgnsize = rgn->region[i].size; | ||
213 | if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) | ||
214 | break; | ||
215 | } | ||
216 | |||
217 | return (i < rgn->cnt) ? i : -1; | ||
218 | } | ||
219 | |||
220 | static u64 lmb_align_down(u64 addr, u64 size) | ||
221 | { | ||
222 | return addr & ~(size - 1); | ||
223 | } | ||
224 | |||
225 | static u64 lmb_align_up(u64 addr, u64 size) | ||
226 | { | ||
227 | return (addr + (size - 1)) & ~(size - 1); | ||
228 | } | ||
229 | |||
230 | static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, | ||
231 | u64 size, u64 align) | ||
232 | { | ||
233 | u64 base, res_base; | ||
234 | long j; | ||
235 | |||
236 | base = lmb_align_down((end - size), align); | ||
237 | while (start <= base) { | ||
238 | j = lmb_overlaps_region(&lmb.reserved, base, size); | ||
239 | if (j < 0) { | ||
240 | /* this area isn't reserved, take it */ | ||
241 | if (lmb_add_region(&lmb.reserved, base, | ||
242 | lmb_align_up(size, align)) < 0) | ||
243 | base = ~(u64)0; | ||
244 | return base; | ||
245 | } | ||
246 | res_base = lmb.reserved.region[j].base; | ||
247 | if (res_base < size) | ||
248 | break; | ||
249 | base = lmb_align_down(res_base - size, align); | ||
250 | } | ||
251 | |||
252 | return ~(u64)0; | ||
253 | } | ||
254 | |||
255 | static u64 __init lmb_alloc_nid_region(struct lmb_property *mp, | ||
256 | u64 (*nid_range)(u64, u64, int *), | ||
257 | u64 size, u64 align, int nid) | ||
258 | { | ||
259 | u64 start, end; | ||
260 | |||
261 | start = mp->base; | ||
262 | end = start + mp->size; | ||
263 | |||
264 | start = lmb_align_up(start, align); | ||
265 | while (start < end) { | ||
266 | u64 this_end; | ||
267 | int this_nid; | ||
268 | |||
269 | this_end = nid_range(start, end, &this_nid); | ||
270 | if (this_nid == nid) { | ||
271 | u64 ret = lmb_alloc_nid_unreserved(start, this_end, | ||
272 | size, align); | ||
273 | if (ret != ~(u64)0) | ||
274 | return ret; | ||
275 | } | ||
276 | start = this_end; | ||
277 | } | ||
278 | |||
279 | return ~(u64)0; | ||
280 | } | ||
281 | |||
282 | u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, | ||
283 | u64 (*nid_range)(u64 start, u64 end, int *nid)) | ||
284 | { | ||
285 | struct lmb_region *mem = &lmb.memory; | ||
286 | int i; | ||
287 | |||
288 | for (i = 0; i < mem->cnt; i++) { | ||
289 | u64 ret = lmb_alloc_nid_region(&mem->region[i], | ||
290 | nid_range, | ||
291 | size, align, nid); | ||
292 | if (ret != ~(u64)0) | ||
293 | return ret; | ||
294 | } | ||
295 | |||
296 | return lmb_alloc(size, align); | ||
297 | } | ||
298 | |||
299 | u64 __init lmb_alloc(u64 size, u64 align) | ||
300 | { | ||
301 | return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); | ||
302 | } | ||
303 | |||
304 | u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) | ||
305 | { | ||
306 | u64 alloc; | ||
307 | |||
308 | alloc = __lmb_alloc_base(size, align, max_addr); | ||
309 | |||
310 | if (alloc == 0) | ||
311 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | ||
312 | (unsigned long long) size, (unsigned long long) max_addr); | ||
313 | |||
314 | return alloc; | ||
315 | } | ||
316 | |||
317 | u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) | ||
318 | { | ||
319 | long i, j; | ||
320 | u64 base = 0; | ||
321 | u64 res_base; | ||
322 | |||
323 | BUG_ON(0 == size); | ||
324 | |||
325 | /* On some platforms, make sure we allocate lowmem */ | ||
326 | /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ | ||
327 | if (max_addr == LMB_ALLOC_ANYWHERE) | ||
328 | max_addr = LMB_REAL_LIMIT; | ||
329 | |||
330 | for (i = lmb.memory.cnt - 1; i >= 0; i--) { | ||
331 | u64 lmbbase = lmb.memory.region[i].base; | ||
332 | u64 lmbsize = lmb.memory.region[i].size; | ||
333 | |||
334 | if (lmbsize < size) | ||
335 | continue; | ||
336 | if (max_addr == LMB_ALLOC_ANYWHERE) | ||
337 | base = lmb_align_down(lmbbase + lmbsize - size, align); | ||
338 | else if (lmbbase < max_addr) { | ||
339 | base = min(lmbbase + lmbsize, max_addr); | ||
340 | base = lmb_align_down(base - size, align); | ||
341 | } else | ||
342 | continue; | ||
343 | |||
344 | while (base && lmbbase <= base) { | ||
345 | j = lmb_overlaps_region(&lmb.reserved, base, size); | ||
346 | if (j < 0) { | ||
347 | /* this area isn't reserved, take it */ | ||
348 | if (lmb_add_region(&lmb.reserved, base, | ||
349 | lmb_align_up(size, align)) < 0) | ||
350 | return 0; | ||
351 | return base; | ||
352 | } | ||
353 | res_base = lmb.reserved.region[j].base; | ||
354 | if (res_base < size) | ||
355 | break; | ||
356 | base = lmb_align_down(res_base - size, align); | ||
357 | } | ||
358 | } | ||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | /* You must call lmb_analyze() before this. */ | ||
363 | u64 __init lmb_phys_mem_size(void) | ||
364 | { | ||
365 | return lmb.memory.size; | ||
366 | } | ||
367 | |||
368 | u64 __init lmb_end_of_DRAM(void) | ||
369 | { | ||
370 | int idx = lmb.memory.cnt - 1; | ||
371 | |||
372 | return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); | ||
373 | } | ||
374 | |||
375 | /* You must call lmb_analyze() after this. */ | ||
376 | void __init lmb_enforce_memory_limit(u64 memory_limit) | ||
377 | { | ||
378 | unsigned long i; | ||
379 | u64 limit; | ||
380 | struct lmb_property *p; | ||
381 | |||
382 | if (!memory_limit) | ||
383 | return; | ||
384 | |||
385 | /* Truncate the lmb regions to satisfy the memory limit. */ | ||
386 | limit = memory_limit; | ||
387 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
388 | if (limit > lmb.memory.region[i].size) { | ||
389 | limit -= lmb.memory.region[i].size; | ||
390 | continue; | ||
391 | } | ||
392 | |||
393 | lmb.memory.region[i].size = limit; | ||
394 | lmb.memory.cnt = i + 1; | ||
395 | break; | ||
396 | } | ||
397 | |||
398 | if (lmb.memory.region[0].size < lmb.rmo_size) | ||
399 | lmb.rmo_size = lmb.memory.region[0].size; | ||
400 | |||
401 | /* And truncate any reserves above the limit also. */ | ||
402 | for (i = 0; i < lmb.reserved.cnt; i++) { | ||
403 | p = &lmb.reserved.region[i]; | ||
404 | |||
405 | if (p->base > memory_limit) | ||
406 | p->size = 0; | ||
407 | else if ((p->base + p->size) > memory_limit) | ||
408 | p->size = memory_limit - p->base; | ||
409 | |||
410 | if (p->size == 0) { | ||
411 | lmb_remove_region(&lmb.reserved, i); | ||
412 | i--; | ||
413 | } | ||
414 | } | ||
415 | } | ||
416 | |||
417 | int __init lmb_is_reserved(u64 addr) | ||
418 | { | ||
419 | int i; | ||
420 | |||
421 | for (i = 0; i < lmb.reserved.cnt; i++) { | ||
422 | u64 upper = lmb.reserved.region[i].base + | ||
423 | lmb.reserved.region[i].size - 1; | ||
424 | if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) | ||
425 | return 1; | ||
426 | } | ||
427 | return 0; | ||
428 | } | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 65f0e758ec38..bd521716ab1a 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -114,8 +114,7 @@ radix_tree_node_alloc(struct radix_tree_root *root) | |||
114 | } | 114 | } |
115 | } | 115 | } |
116 | if (ret == NULL) | 116 | if (ret == NULL) |
117 | ret = kmem_cache_alloc(radix_tree_node_cachep, | 117 | ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
118 | set_migrateflags(gfp_mask, __GFP_RECLAIMABLE)); | ||
119 | 118 | ||
120 | BUG_ON(radix_tree_is_indirect_ptr(ret)); | 119 | BUG_ON(radix_tree_is_indirect_ptr(ret)); |
121 | return ret; | 120 | return ret; |
@@ -150,8 +149,7 @@ int radix_tree_preload(gfp_t gfp_mask) | |||
150 | rtp = &__get_cpu_var(radix_tree_preloads); | 149 | rtp = &__get_cpu_var(radix_tree_preloads); |
151 | while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { | 150 | while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { |
152 | preempt_enable(); | 151 | preempt_enable(); |
153 | node = kmem_cache_alloc(radix_tree_node_cachep, | 152 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
154 | set_migrateflags(gfp_mask, __GFP_RECLAIMABLE)); | ||
155 | if (node == NULL) | 153 | if (node == NULL) |
156 | goto out; | 154 | goto out; |
157 | preempt_disable(); | 155 | preempt_disable(); |
@@ -1098,7 +1096,8 @@ void __init radix_tree_init(void) | |||
1098 | { | 1096 | { |
1099 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", | 1097 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", |
1100 | sizeof(struct radix_tree_node), 0, | 1098 | sizeof(struct radix_tree_node), 0, |
1101 | SLAB_PANIC, radix_tree_node_ctor); | 1099 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, |
1100 | radix_tree_node_ctor); | ||
1102 | radix_tree_init_maxindex(); | 1101 | radix_tree_init_maxindex(); |
1103 | hotcpu_notifier(radix_tree_callback, 0); | 1102 | hotcpu_notifier(radix_tree_callback, 0); |
1104 | } | 1103 | } |
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c index 3ea2db94d5b0..06d04cfa9339 100644 --- a/lib/reed_solomon/reed_solomon.c +++ b/lib/reed_solomon/reed_solomon.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include <linux/rslib.h> | 45 | #include <linux/rslib.h> |
46 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <asm/semaphore.h> | ||
49 | 48 | ||
50 | /* This list holds all currently allocated rs control structures */ | 49 | /* This list holds all currently allocated rs control structures */ |
51 | static LIST_HEAD (rslist); | 50 | static LIST_HEAD (rslist); |