diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 6 | ||||
-rw-r--r-- | lib/Kconfig.debug | 6 | ||||
-rw-r--r-- | lib/Kconfig.kmemcheck | 91 | ||||
-rw-r--r-- | lib/Makefile | 4 | ||||
-rw-r--r-- | lib/atomic64.c | 175 | ||||
-rw-r--r-- | lib/dec_and_lock.c | 3 | ||||
-rw-r--r-- | lib/gcd.c | 18 | ||||
-rw-r--r-- | lib/genalloc.c | 1 | ||||
-rw-r--r-- | lib/hexdump.c | 15 | ||||
-rw-r--r-- | lib/kobject.c | 7 | ||||
-rw-r--r-- | lib/radix-tree.c | 110 | ||||
-rw-r--r-- | lib/rbtree.c | 34 |
12 files changed, 393 insertions, 77 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 9960be04cbbe..bb1326d3839c 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -194,4 +194,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS | |||
194 | config NLATTR | 194 | config NLATTR |
195 | bool | 195 | bool |
196 | 196 | ||
197 | # | ||
198 | # Generic 64-bit atomic support is selected if needed | ||
199 | # | ||
200 | config GENERIC_ATOMIC64 | ||
201 | bool | ||
202 | |||
197 | endmenu | 203 | endmenu |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 116a35051be6..6b0c2d8a2129 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -300,7 +300,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT | |||
300 | 300 | ||
301 | config DEBUG_SLAB | 301 | config DEBUG_SLAB |
302 | bool "Debug slab memory allocations" | 302 | bool "Debug slab memory allocations" |
303 | depends on DEBUG_KERNEL && SLAB | 303 | depends on DEBUG_KERNEL && SLAB && !KMEMCHECK |
304 | help | 304 | help |
305 | Say Y here to have the kernel do limited verification on memory | 305 | Say Y here to have the kernel do limited verification on memory |
306 | allocation as well as poisoning memory on free to catch use of freed | 306 | allocation as well as poisoning memory on free to catch use of freed |
@@ -312,7 +312,7 @@ config DEBUG_SLAB_LEAK | |||
312 | 312 | ||
313 | config SLUB_DEBUG_ON | 313 | config SLUB_DEBUG_ON |
314 | bool "SLUB debugging on by default" | 314 | bool "SLUB debugging on by default" |
315 | depends on SLUB && SLUB_DEBUG | 315 | depends on SLUB && SLUB_DEBUG && !KMEMCHECK |
316 | default n | 316 | default n |
317 | help | 317 | help |
318 | Boot with debugging on by default. SLUB boots by default with | 318 | Boot with debugging on by default. SLUB boots by default with |
@@ -996,3 +996,5 @@ config DMA_API_DEBUG | |||
996 | source "samples/Kconfig" | 996 | source "samples/Kconfig" |
997 | 997 | ||
998 | source "lib/Kconfig.kgdb" | 998 | source "lib/Kconfig.kgdb" |
999 | |||
1000 | source "lib/Kconfig.kmemcheck" | ||
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck new file mode 100644 index 000000000000..603c81b66549 --- /dev/null +++ b/lib/Kconfig.kmemcheck | |||
@@ -0,0 +1,91 @@ | |||
1 | config HAVE_ARCH_KMEMCHECK | ||
2 | bool | ||
3 | |||
4 | menuconfig KMEMCHECK | ||
5 | bool "kmemcheck: trap use of uninitialized memory" | ||
6 | depends on DEBUG_KERNEL | ||
7 | depends on !X86_USE_3DNOW | ||
8 | depends on SLUB || SLAB | ||
9 | depends on !CC_OPTIMIZE_FOR_SIZE | ||
10 | depends on !FUNCTION_TRACER | ||
11 | select FRAME_POINTER | ||
12 | select STACKTRACE | ||
13 | default n | ||
14 | help | ||
15 | This option enables tracing of dynamically allocated kernel memory | ||
16 | to see if memory is used before it has been given an initial value. | ||
17 | Be aware that this requires half of your memory for bookkeeping and | ||
18 | will insert extra code at *every* read and write to tracked memory | ||
19 | thus slow down the kernel code (but user code is unaffected). | ||
20 | |||
21 | The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable | ||
22 | or enable kmemcheck at boot-time. If the kernel is started with | ||
23 | kmemcheck=0, the large memory and CPU overhead is not incurred. | ||
24 | |||
25 | choice | ||
26 | prompt "kmemcheck: default mode at boot" | ||
27 | depends on KMEMCHECK | ||
28 | default KMEMCHECK_ONESHOT_BY_DEFAULT | ||
29 | help | ||
30 | This option controls the default behaviour of kmemcheck when the | ||
31 | kernel boots and no kmemcheck= parameter is given. | ||
32 | |||
33 | config KMEMCHECK_DISABLED_BY_DEFAULT | ||
34 | bool "disabled" | ||
35 | depends on KMEMCHECK | ||
36 | |||
37 | config KMEMCHECK_ENABLED_BY_DEFAULT | ||
38 | bool "enabled" | ||
39 | depends on KMEMCHECK | ||
40 | |||
41 | config KMEMCHECK_ONESHOT_BY_DEFAULT | ||
42 | bool "one-shot" | ||
43 | depends on KMEMCHECK | ||
44 | help | ||
45 | In one-shot mode, only the first error detected is reported before | ||
46 | kmemcheck is disabled. | ||
47 | |||
48 | endchoice | ||
49 | |||
50 | config KMEMCHECK_QUEUE_SIZE | ||
51 | int "kmemcheck: error queue size" | ||
52 | depends on KMEMCHECK | ||
53 | default 64 | ||
54 | help | ||
55 | Select the maximum number of errors to store in the queue. Since | ||
56 | errors can occur virtually anywhere and in any context, we need a | ||
57 | temporary storage area which is guarantueed not to generate any | ||
58 | other faults. The queue will be emptied as soon as a tasklet may | ||
59 | be scheduled. If the queue is full, new error reports will be | ||
60 | lost. | ||
61 | |||
62 | config KMEMCHECK_SHADOW_COPY_SHIFT | ||
63 | int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)" | ||
64 | depends on KMEMCHECK | ||
65 | range 2 8 | ||
66 | default 5 | ||
67 | help | ||
68 | Select the number of shadow bytes to save along with each entry of | ||
69 | the queue. These bytes indicate what parts of an allocation are | ||
70 | initialized, uninitialized, etc. and will be displayed when an | ||
71 | error is detected to help the debugging of a particular problem. | ||
72 | |||
73 | config KMEMCHECK_PARTIAL_OK | ||
74 | bool "kmemcheck: allow partially uninitialized memory" | ||
75 | depends on KMEMCHECK | ||
76 | default y | ||
77 | help | ||
78 | This option works around certain GCC optimizations that produce | ||
79 | 32-bit reads from 16-bit variables where the upper 16 bits are | ||
80 | thrown away afterwards. This may of course also hide some real | ||
81 | bugs. | ||
82 | |||
83 | config KMEMCHECK_BITOPS_OK | ||
84 | bool "kmemcheck: allow bit-field manipulation" | ||
85 | depends on KMEMCHECK | ||
86 | default n | ||
87 | help | ||
88 | This option silences warnings that would be generated for bit-field | ||
89 | accesses where not all the bits are initialized at the same time. | ||
90 | This may also hide some real bugs. | ||
91 | |||
diff --git a/lib/Makefile b/lib/Makefile index 34c5c0e6222e..b6d1857bbf08 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o | |||
21 | 21 | ||
22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
24 | string_helpers.o | 24 | string_helpers.o gcd.o |
25 | 25 | ||
26 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 26 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
27 | CFLAGS_kobject.o += -DDEBUG | 27 | CFLAGS_kobject.o += -DDEBUG |
@@ -95,6 +95,8 @@ obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o | |||
95 | 95 | ||
96 | obj-$(CONFIG_GENERIC_CSUM) += checksum.o | 96 | obj-$(CONFIG_GENERIC_CSUM) += checksum.o |
97 | 97 | ||
98 | obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o | ||
99 | |||
98 | hostprogs-y := gen_crc32table | 100 | hostprogs-y := gen_crc32table |
99 | clean-files := crc32table.h | 101 | clean-files := crc32table.h |
100 | 102 | ||
diff --git a/lib/atomic64.c b/lib/atomic64.c new file mode 100644 index 000000000000..c5e725562416 --- /dev/null +++ b/lib/atomic64.c | |||
@@ -0,0 +1,175 @@ | |||
1 | /* | ||
2 | * Generic implementation of 64-bit atomics using spinlocks, | ||
3 | * useful on processors that don't have 64-bit atomic instructions. | ||
4 | * | ||
5 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/cache.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <asm/atomic.h> | ||
17 | |||
18 | /* | ||
19 | * We use a hashed array of spinlocks to provide exclusive access | ||
20 | * to each atomic64_t variable. Since this is expected to used on | ||
21 | * systems with small numbers of CPUs (<= 4 or so), we use a | ||
22 | * relatively small array of 16 spinlocks to avoid wasting too much | ||
23 | * memory on the spinlock array. | ||
24 | */ | ||
25 | #define NR_LOCKS 16 | ||
26 | |||
27 | /* | ||
28 | * Ensure each lock is in a separate cacheline. | ||
29 | */ | ||
30 | static union { | ||
31 | spinlock_t lock; | ||
32 | char pad[L1_CACHE_BYTES]; | ||
33 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; | ||
34 | |||
35 | static inline spinlock_t *lock_addr(const atomic64_t *v) | ||
36 | { | ||
37 | unsigned long addr = (unsigned long) v; | ||
38 | |||
39 | addr >>= L1_CACHE_SHIFT; | ||
40 | addr ^= (addr >> 8) ^ (addr >> 16); | ||
41 | return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; | ||
42 | } | ||
43 | |||
44 | long long atomic64_read(const atomic64_t *v) | ||
45 | { | ||
46 | unsigned long flags; | ||
47 | spinlock_t *lock = lock_addr(v); | ||
48 | long long val; | ||
49 | |||
50 | spin_lock_irqsave(lock, flags); | ||
51 | val = v->counter; | ||
52 | spin_unlock_irqrestore(lock, flags); | ||
53 | return val; | ||
54 | } | ||
55 | |||
56 | void atomic64_set(atomic64_t *v, long long i) | ||
57 | { | ||
58 | unsigned long flags; | ||
59 | spinlock_t *lock = lock_addr(v); | ||
60 | |||
61 | spin_lock_irqsave(lock, flags); | ||
62 | v->counter = i; | ||
63 | spin_unlock_irqrestore(lock, flags); | ||
64 | } | ||
65 | |||
66 | void atomic64_add(long long a, atomic64_t *v) | ||
67 | { | ||
68 | unsigned long flags; | ||
69 | spinlock_t *lock = lock_addr(v); | ||
70 | |||
71 | spin_lock_irqsave(lock, flags); | ||
72 | v->counter += a; | ||
73 | spin_unlock_irqrestore(lock, flags); | ||
74 | } | ||
75 | |||
76 | long long atomic64_add_return(long long a, atomic64_t *v) | ||
77 | { | ||
78 | unsigned long flags; | ||
79 | spinlock_t *lock = lock_addr(v); | ||
80 | long long val; | ||
81 | |||
82 | spin_lock_irqsave(lock, flags); | ||
83 | val = v->counter += a; | ||
84 | spin_unlock_irqrestore(lock, flags); | ||
85 | return val; | ||
86 | } | ||
87 | |||
88 | void atomic64_sub(long long a, atomic64_t *v) | ||
89 | { | ||
90 | unsigned long flags; | ||
91 | spinlock_t *lock = lock_addr(v); | ||
92 | |||
93 | spin_lock_irqsave(lock, flags); | ||
94 | v->counter -= a; | ||
95 | spin_unlock_irqrestore(lock, flags); | ||
96 | } | ||
97 | |||
98 | long long atomic64_sub_return(long long a, atomic64_t *v) | ||
99 | { | ||
100 | unsigned long flags; | ||
101 | spinlock_t *lock = lock_addr(v); | ||
102 | long long val; | ||
103 | |||
104 | spin_lock_irqsave(lock, flags); | ||
105 | val = v->counter -= a; | ||
106 | spin_unlock_irqrestore(lock, flags); | ||
107 | return val; | ||
108 | } | ||
109 | |||
110 | long long atomic64_dec_if_positive(atomic64_t *v) | ||
111 | { | ||
112 | unsigned long flags; | ||
113 | spinlock_t *lock = lock_addr(v); | ||
114 | long long val; | ||
115 | |||
116 | spin_lock_irqsave(lock, flags); | ||
117 | val = v->counter - 1; | ||
118 | if (val >= 0) | ||
119 | v->counter = val; | ||
120 | spin_unlock_irqrestore(lock, flags); | ||
121 | return val; | ||
122 | } | ||
123 | |||
124 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | ||
125 | { | ||
126 | unsigned long flags; | ||
127 | spinlock_t *lock = lock_addr(v); | ||
128 | long long val; | ||
129 | |||
130 | spin_lock_irqsave(lock, flags); | ||
131 | val = v->counter; | ||
132 | if (val == o) | ||
133 | v->counter = n; | ||
134 | spin_unlock_irqrestore(lock, flags); | ||
135 | return val; | ||
136 | } | ||
137 | |||
138 | long long atomic64_xchg(atomic64_t *v, long long new) | ||
139 | { | ||
140 | unsigned long flags; | ||
141 | spinlock_t *lock = lock_addr(v); | ||
142 | long long val; | ||
143 | |||
144 | spin_lock_irqsave(lock, flags); | ||
145 | val = v->counter; | ||
146 | v->counter = new; | ||
147 | spin_unlock_irqrestore(lock, flags); | ||
148 | return val; | ||
149 | } | ||
150 | |||
151 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) | ||
152 | { | ||
153 | unsigned long flags; | ||
154 | spinlock_t *lock = lock_addr(v); | ||
155 | int ret = 1; | ||
156 | |||
157 | spin_lock_irqsave(lock, flags); | ||
158 | if (v->counter != u) { | ||
159 | v->counter += a; | ||
160 | ret = 0; | ||
161 | } | ||
162 | spin_unlock_irqrestore(lock, flags); | ||
163 | return ret; | ||
164 | } | ||
165 | |||
166 | static int init_atomic64_lock(void) | ||
167 | { | ||
168 | int i; | ||
169 | |||
170 | for (i = 0; i < NR_LOCKS; ++i) | ||
171 | spin_lock_init(&atomic64_lock[i].lock); | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | pure_initcall(init_atomic64_lock); | ||
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index a65c31455541..e73822aa6e9a 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c | |||
@@ -19,11 +19,10 @@ | |||
19 | */ | 19 | */ |
20 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 20 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
21 | { | 21 | { |
22 | #ifdef CONFIG_SMP | ||
23 | /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ | 22 | /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ |
24 | if (atomic_add_unless(atomic, -1, 1)) | 23 | if (atomic_add_unless(atomic, -1, 1)) |
25 | return 0; | 24 | return 0; |
26 | #endif | 25 | |
27 | /* Otherwise do it the slow way */ | 26 | /* Otherwise do it the slow way */ |
28 | spin_lock(lock); | 27 | spin_lock(lock); |
29 | if (atomic_dec_and_test(atomic)) | 28 | if (atomic_dec_and_test(atomic)) |
diff --git a/lib/gcd.c b/lib/gcd.c new file mode 100644 index 000000000000..f879033d9822 --- /dev/null +++ b/lib/gcd.c | |||
@@ -0,0 +1,18 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/gcd.h> | ||
3 | #include <linux/module.h> | ||
4 | |||
5 | /* Greatest common divisor */ | ||
6 | unsigned long gcd(unsigned long a, unsigned long b) | ||
7 | { | ||
8 | unsigned long r; | ||
9 | |||
10 | if (a < b) | ||
11 | swap(a, b); | ||
12 | while ((r = a % b) != 0) { | ||
13 | a = b; | ||
14 | b = r; | ||
15 | } | ||
16 | return b; | ||
17 | } | ||
18 | EXPORT_SYMBOL_GPL(gcd); | ||
diff --git a/lib/genalloc.c b/lib/genalloc.c index f6d276db2d58..eed2bdb865e7 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -85,7 +85,6 @@ void gen_pool_destroy(struct gen_pool *pool) | |||
85 | int bit, end_bit; | 85 | int bit, end_bit; |
86 | 86 | ||
87 | 87 | ||
88 | write_lock(&pool->lock); | ||
89 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { | 88 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { |
90 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | 89 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); |
91 | list_del(&chunk->next_chunk); | 90 | list_del(&chunk->next_chunk); |
diff --git a/lib/hexdump.c b/lib/hexdump.c index f07c0db81d26..39af2560f765 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c | |||
@@ -65,7 +65,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, | |||
65 | 65 | ||
66 | for (j = 0; j < ngroups; j++) | 66 | for (j = 0; j < ngroups; j++) |
67 | lx += scnprintf(linebuf + lx, linebuflen - lx, | 67 | lx += scnprintf(linebuf + lx, linebuflen - lx, |
68 | "%16.16llx ", (unsigned long long)*(ptr8 + j)); | 68 | "%s%16.16llx", j ? " " : "", |
69 | (unsigned long long)*(ptr8 + j)); | ||
69 | ascii_column = 17 * ngroups + 2; | 70 | ascii_column = 17 * ngroups + 2; |
70 | break; | 71 | break; |
71 | } | 72 | } |
@@ -76,7 +77,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, | |||
76 | 77 | ||
77 | for (j = 0; j < ngroups; j++) | 78 | for (j = 0; j < ngroups; j++) |
78 | lx += scnprintf(linebuf + lx, linebuflen - lx, | 79 | lx += scnprintf(linebuf + lx, linebuflen - lx, |
79 | "%8.8x ", *(ptr4 + j)); | 80 | "%s%8.8x", j ? " " : "", *(ptr4 + j)); |
80 | ascii_column = 9 * ngroups + 2; | 81 | ascii_column = 9 * ngroups + 2; |
81 | break; | 82 | break; |
82 | } | 83 | } |
@@ -87,19 +88,21 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, | |||
87 | 88 | ||
88 | for (j = 0; j < ngroups; j++) | 89 | for (j = 0; j < ngroups; j++) |
89 | lx += scnprintf(linebuf + lx, linebuflen - lx, | 90 | lx += scnprintf(linebuf + lx, linebuflen - lx, |
90 | "%4.4x ", *(ptr2 + j)); | 91 | "%s%4.4x", j ? " " : "", *(ptr2 + j)); |
91 | ascii_column = 5 * ngroups + 2; | 92 | ascii_column = 5 * ngroups + 2; |
92 | break; | 93 | break; |
93 | } | 94 | } |
94 | 95 | ||
95 | default: | 96 | default: |
96 | for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; | 97 | for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { |
97 | j++) { | ||
98 | ch = ptr[j]; | 98 | ch = ptr[j]; |
99 | linebuf[lx++] = hex_asc_hi(ch); | 99 | linebuf[lx++] = hex_asc_hi(ch); |
100 | linebuf[lx++] = hex_asc_lo(ch); | 100 | linebuf[lx++] = hex_asc_lo(ch); |
101 | linebuf[lx++] = ' '; | 101 | linebuf[lx++] = ' '; |
102 | } | 102 | } |
103 | if (j) | ||
104 | lx--; | ||
105 | |||
103 | ascii_column = 3 * rowsize + 2; | 106 | ascii_column = 3 * rowsize + 2; |
104 | break; | 107 | break; |
105 | } | 108 | } |
@@ -108,7 +111,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, | |||
108 | 111 | ||
109 | while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) | 112 | while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) |
110 | linebuf[lx++] = ' '; | 113 | linebuf[lx++] = ' '; |
111 | for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++) | 114 | for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) |
112 | linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] | 115 | linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] |
113 | : '.'; | 116 | : '.'; |
114 | nil: | 117 | nil: |
diff --git a/lib/kobject.c b/lib/kobject.c index bacf6fe4f7a0..b512b746d2af 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -793,11 +793,16 @@ static struct kset *kset_create(const char *name, | |||
793 | struct kobject *parent_kobj) | 793 | struct kobject *parent_kobj) |
794 | { | 794 | { |
795 | struct kset *kset; | 795 | struct kset *kset; |
796 | int retval; | ||
796 | 797 | ||
797 | kset = kzalloc(sizeof(*kset), GFP_KERNEL); | 798 | kset = kzalloc(sizeof(*kset), GFP_KERNEL); |
798 | if (!kset) | 799 | if (!kset) |
799 | return NULL; | 800 | return NULL; |
800 | kobject_set_name(&kset->kobj, name); | 801 | retval = kobject_set_name(&kset->kobj, name); |
802 | if (retval) { | ||
803 | kfree(kset); | ||
804 | return NULL; | ||
805 | } | ||
801 | kset->uevent_ops = uevent_ops; | 806 | kset->uevent_ops = uevent_ops; |
802 | kset->kobj.parent = parent_kobj; | 807 | kset->kobj.parent = parent_kobj; |
803 | 808 | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 4bb42a0344ec..23abbd93cae1 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -351,20 +351,12 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
351 | } | 351 | } |
352 | EXPORT_SYMBOL(radix_tree_insert); | 352 | EXPORT_SYMBOL(radix_tree_insert); |
353 | 353 | ||
354 | /** | 354 | /* |
355 | * radix_tree_lookup_slot - lookup a slot in a radix tree | 355 | * is_slot == 1 : search for the slot. |
356 | * @root: radix tree root | 356 | * is_slot == 0 : search for the node. |
357 | * @index: index key | ||
358 | * | ||
359 | * Returns: the slot corresponding to the position @index in the | ||
360 | * radix tree @root. This is useful for update-if-exists operations. | ||
361 | * | ||
362 | * This function can be called under rcu_read_lock iff the slot is not | ||
363 | * modified by radix_tree_replace_slot, otherwise it must be called | ||
364 | * exclusive from other writers. Any dereference of the slot must be done | ||
365 | * using radix_tree_deref_slot. | ||
366 | */ | 357 | */ |
367 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | 358 | static void *radix_tree_lookup_element(struct radix_tree_root *root, |
359 | unsigned long index, int is_slot) | ||
368 | { | 360 | { |
369 | unsigned int height, shift; | 361 | unsigned int height, shift; |
370 | struct radix_tree_node *node, **slot; | 362 | struct radix_tree_node *node, **slot; |
@@ -376,7 +368,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | |||
376 | if (!radix_tree_is_indirect_ptr(node)) { | 368 | if (!radix_tree_is_indirect_ptr(node)) { |
377 | if (index > 0) | 369 | if (index > 0) |
378 | return NULL; | 370 | return NULL; |
379 | return (void **)&root->rnode; | 371 | return is_slot ? (void *)&root->rnode : node; |
380 | } | 372 | } |
381 | node = radix_tree_indirect_to_ptr(node); | 373 | node = radix_tree_indirect_to_ptr(node); |
382 | 374 | ||
@@ -397,7 +389,25 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | |||
397 | height--; | 389 | height--; |
398 | } while (height > 0); | 390 | } while (height > 0); |
399 | 391 | ||
400 | return (void **)slot; | 392 | return is_slot ? (void *)slot:node; |
393 | } | ||
394 | |||
395 | /** | ||
396 | * radix_tree_lookup_slot - lookup a slot in a radix tree | ||
397 | * @root: radix tree root | ||
398 | * @index: index key | ||
399 | * | ||
400 | * Returns: the slot corresponding to the position @index in the | ||
401 | * radix tree @root. This is useful for update-if-exists operations. | ||
402 | * | ||
403 | * This function can be called under rcu_read_lock iff the slot is not | ||
404 | * modified by radix_tree_replace_slot, otherwise it must be called | ||
405 | * exclusive from other writers. Any dereference of the slot must be done | ||
406 | * using radix_tree_deref_slot. | ||
407 | */ | ||
408 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | ||
409 | { | ||
410 | return (void **)radix_tree_lookup_element(root, index, 1); | ||
401 | } | 411 | } |
402 | EXPORT_SYMBOL(radix_tree_lookup_slot); | 412 | EXPORT_SYMBOL(radix_tree_lookup_slot); |
403 | 413 | ||
@@ -415,38 +425,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot); | |||
415 | */ | 425 | */ |
416 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) | 426 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) |
417 | { | 427 | { |
418 | unsigned int height, shift; | 428 | return radix_tree_lookup_element(root, index, 0); |
419 | struct radix_tree_node *node, **slot; | ||
420 | |||
421 | node = rcu_dereference(root->rnode); | ||
422 | if (node == NULL) | ||
423 | return NULL; | ||
424 | |||
425 | if (!radix_tree_is_indirect_ptr(node)) { | ||
426 | if (index > 0) | ||
427 | return NULL; | ||
428 | return node; | ||
429 | } | ||
430 | node = radix_tree_indirect_to_ptr(node); | ||
431 | |||
432 | height = node->height; | ||
433 | if (index > radix_tree_maxindex(height)) | ||
434 | return NULL; | ||
435 | |||
436 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | ||
437 | |||
438 | do { | ||
439 | slot = (struct radix_tree_node **) | ||
440 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); | ||
441 | node = rcu_dereference(*slot); | ||
442 | if (node == NULL) | ||
443 | return NULL; | ||
444 | |||
445 | shift -= RADIX_TREE_MAP_SHIFT; | ||
446 | height--; | ||
447 | } while (height > 0); | ||
448 | |||
449 | return node; | ||
450 | } | 429 | } |
451 | EXPORT_SYMBOL(radix_tree_lookup); | 430 | EXPORT_SYMBOL(radix_tree_lookup); |
452 | 431 | ||
@@ -666,6 +645,43 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root, | |||
666 | } | 645 | } |
667 | EXPORT_SYMBOL(radix_tree_next_hole); | 646 | EXPORT_SYMBOL(radix_tree_next_hole); |
668 | 647 | ||
648 | /** | ||
649 | * radix_tree_prev_hole - find the prev hole (not-present entry) | ||
650 | * @root: tree root | ||
651 | * @index: index key | ||
652 | * @max_scan: maximum range to search | ||
653 | * | ||
654 | * Search backwards in the range [max(index-max_scan+1, 0), index] | ||
655 | * for the first hole. | ||
656 | * | ||
657 | * Returns: the index of the hole if found, otherwise returns an index | ||
658 | * outside of the set specified (in which case 'index - return >= max_scan' | ||
659 | * will be true). In rare cases of wrap-around, LONG_MAX will be returned. | ||
660 | * | ||
661 | * radix_tree_next_hole may be called under rcu_read_lock. However, like | ||
662 | * radix_tree_gang_lookup, this will not atomically search a snapshot of | ||
663 | * the tree at a single point in time. For example, if a hole is created | ||
664 | * at index 10, then subsequently a hole is created at index 5, | ||
665 | * radix_tree_prev_hole covering both indexes may return 5 if called under | ||
666 | * rcu_read_lock. | ||
667 | */ | ||
668 | unsigned long radix_tree_prev_hole(struct radix_tree_root *root, | ||
669 | unsigned long index, unsigned long max_scan) | ||
670 | { | ||
671 | unsigned long i; | ||
672 | |||
673 | for (i = 0; i < max_scan; i++) { | ||
674 | if (!radix_tree_lookup(root, index)) | ||
675 | break; | ||
676 | index--; | ||
677 | if (index == LONG_MAX) | ||
678 | break; | ||
679 | } | ||
680 | |||
681 | return index; | ||
682 | } | ||
683 | EXPORT_SYMBOL(radix_tree_prev_hole); | ||
684 | |||
669 | static unsigned int | 685 | static unsigned int |
670 | __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, | 686 | __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, |
671 | unsigned int max_items, unsigned long *next_index) | 687 | unsigned int max_items, unsigned long *next_index) |
diff --git a/lib/rbtree.c b/lib/rbtree.c index f653659e0bc1..e2aa3be29858 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c | |||
@@ -231,34 +231,34 @@ void rb_erase(struct rb_node *node, struct rb_root *root) | |||
231 | node = node->rb_right; | 231 | node = node->rb_right; |
232 | while ((left = node->rb_left) != NULL) | 232 | while ((left = node->rb_left) != NULL) |
233 | node = left; | 233 | node = left; |
234 | |||
235 | if (rb_parent(old)) { | ||
236 | if (rb_parent(old)->rb_left == old) | ||
237 | rb_parent(old)->rb_left = node; | ||
238 | else | ||
239 | rb_parent(old)->rb_right = node; | ||
240 | } else | ||
241 | root->rb_node = node; | ||
242 | |||
234 | child = node->rb_right; | 243 | child = node->rb_right; |
235 | parent = rb_parent(node); | 244 | parent = rb_parent(node); |
236 | color = rb_color(node); | 245 | color = rb_color(node); |
237 | 246 | ||
238 | if (child) | ||
239 | rb_set_parent(child, parent); | ||
240 | if (parent == old) { | 247 | if (parent == old) { |
241 | parent->rb_right = child; | ||
242 | parent = node; | 248 | parent = node; |
243 | } else | 249 | } else { |
250 | if (child) | ||
251 | rb_set_parent(child, parent); | ||
244 | parent->rb_left = child; | 252 | parent->rb_left = child; |
245 | 253 | ||
254 | node->rb_right = old->rb_right; | ||
255 | rb_set_parent(old->rb_right, node); | ||
256 | } | ||
257 | |||
246 | node->rb_parent_color = old->rb_parent_color; | 258 | node->rb_parent_color = old->rb_parent_color; |
247 | node->rb_right = old->rb_right; | ||
248 | node->rb_left = old->rb_left; | 259 | node->rb_left = old->rb_left; |
249 | |||
250 | if (rb_parent(old)) | ||
251 | { | ||
252 | if (rb_parent(old)->rb_left == old) | ||
253 | rb_parent(old)->rb_left = node; | ||
254 | else | ||
255 | rb_parent(old)->rb_right = node; | ||
256 | } else | ||
257 | root->rb_node = node; | ||
258 | |||
259 | rb_set_parent(old->rb_left, node); | 260 | rb_set_parent(old->rb_left, node); |
260 | if (old->rb_right) | 261 | |
261 | rb_set_parent(old->rb_right, node); | ||
262 | goto color; | 262 | goto color; |
263 | } | 263 | } |
264 | 264 | ||