aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug25
-rw-r--r--lib/Kconfig.kmemcheck91
-rw-r--r--lib/Makefile6
-rw-r--r--lib/atomic64.c175
-rw-r--r--lib/checksum.c201
-rw-r--r--lib/dec_and_lock.c3
-rw-r--r--lib/dma-debug.c175
-rw-r--r--lib/extable.c21
-rw-r--r--lib/gcd.c18
-rw-r--r--lib/genalloc.c1
-rw-r--r--lib/hexdump.c15
-rw-r--r--lib/kobject.c7
-rw-r--r--lib/radix-tree.c110
-rw-r--r--lib/rbtree.c34
15 files changed, 743 insertions, 145 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 9960be04cbbe..bb1326d3839c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -194,4 +194,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
194config NLATTR 194config NLATTR
195 bool 195 bool
196 196
197#
198# Generic 64-bit atomic support is selected if needed
199#
200config GENERIC_ATOMIC64
201 bool
202
197endmenu 203endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 116a35051be6..12327b2bb785 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -300,7 +300,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT
300 300
301config DEBUG_SLAB 301config DEBUG_SLAB
302 bool "Debug slab memory allocations" 302 bool "Debug slab memory allocations"
303 depends on DEBUG_KERNEL && SLAB 303 depends on DEBUG_KERNEL && SLAB && !KMEMCHECK
304 help 304 help
305 Say Y here to have the kernel do limited verification on memory 305 Say Y here to have the kernel do limited verification on memory
306 allocation as well as poisoning memory on free to catch use of freed 306 allocation as well as poisoning memory on free to catch use of freed
@@ -312,7 +312,7 @@ config DEBUG_SLAB_LEAK
312 312
313config SLUB_DEBUG_ON 313config SLUB_DEBUG_ON
314 bool "SLUB debugging on by default" 314 bool "SLUB debugging on by default"
315 depends on SLUB && SLUB_DEBUG 315 depends on SLUB && SLUB_DEBUG && !KMEMCHECK
316 default n 316 default n
317 help 317 help
318 Boot with debugging on by default. SLUB boots by default with 318 Boot with debugging on by default. SLUB boots by default with
@@ -340,8 +340,6 @@ config DEBUG_KMEMLEAK
340 bool "Kernel memory leak detector" 340 bool "Kernel memory leak detector"
341 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \ 341 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \
342 !MEMORY_HOTPLUG 342 !MEMORY_HOTPLUG
343 select DEBUG_SLAB if SLAB
344 select SLUB_DEBUG if SLUB
345 select DEBUG_FS if SYSFS 343 select DEBUG_FS if SYSFS
346 select STACKTRACE if STACKTRACE_SUPPORT 344 select STACKTRACE if STACKTRACE_SUPPORT
347 select KALLSYMS 345 select KALLSYMS
@@ -355,9 +353,24 @@ config DEBUG_KMEMLEAK
355 allocations. See Documentation/kmemleak.txt for more 353 allocations. See Documentation/kmemleak.txt for more
356 details. 354 details.
357 355
356 Enabling DEBUG_SLAB or SLUB_DEBUG may increase the chances
357 of finding leaks due to the slab objects poisoning.
358
358 In order to access the kmemleak file, debugfs needs to be 359 In order to access the kmemleak file, debugfs needs to be
359 mounted (usually at /sys/kernel/debug). 360 mounted (usually at /sys/kernel/debug).
360 361
362config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
363 int "Maximum kmemleak early log entries"
364 depends on DEBUG_KMEMLEAK
365 range 200 2000
366 default 400
367 help
368 Kmemleak must track all the memory allocations to avoid
369 reporting false positives. Since memory may be allocated or
370 freed before kmemleak is initialised, an early log buffer is
371 used to store these actions. If kmemleak reports "early log
372 buffer exceeded", please increase this value.
373
361config DEBUG_KMEMLEAK_TEST 374config DEBUG_KMEMLEAK_TEST
362 tristate "Simple test for the kernel memory leak detector" 375 tristate "Simple test for the kernel memory leak detector"
363 depends on DEBUG_KMEMLEAK 376 depends on DEBUG_KMEMLEAK
@@ -472,7 +485,7 @@ config LOCKDEP
472 bool 485 bool
473 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 486 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
474 select STACKTRACE 487 select STACKTRACE
475 select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND && !S390 488 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390
476 select KALLSYMS 489 select KALLSYMS
477 select KALLSYMS_ALL 490 select KALLSYMS_ALL
478 491
@@ -996,3 +1009,5 @@ config DMA_API_DEBUG
996source "samples/Kconfig" 1009source "samples/Kconfig"
997 1010
998source "lib/Kconfig.kgdb" 1011source "lib/Kconfig.kgdb"
1012
1013source "lib/Kconfig.kmemcheck"
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck
new file mode 100644
index 000000000000..603c81b66549
--- /dev/null
+++ b/lib/Kconfig.kmemcheck
@@ -0,0 +1,91 @@
1config HAVE_ARCH_KMEMCHECK
2 bool
3
4menuconfig KMEMCHECK
5 bool "kmemcheck: trap use of uninitialized memory"
6 depends on DEBUG_KERNEL
7 depends on !X86_USE_3DNOW
8 depends on SLUB || SLAB
9 depends on !CC_OPTIMIZE_FOR_SIZE
10 depends on !FUNCTION_TRACER
11 select FRAME_POINTER
12 select STACKTRACE
13 default n
14 help
15 This option enables tracing of dynamically allocated kernel memory
16 to see if memory is used before it has been given an initial value.
17 Be aware that this requires half of your memory for bookkeeping and
18 will insert extra code at *every* read and write to tracked memory
19 thus slow down the kernel code (but user code is unaffected).
20
21 The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable
22 or enable kmemcheck at boot-time. If the kernel is started with
23 kmemcheck=0, the large memory and CPU overhead is not incurred.
24
25choice
26 prompt "kmemcheck: default mode at boot"
27 depends on KMEMCHECK
28 default KMEMCHECK_ONESHOT_BY_DEFAULT
29 help
30 This option controls the default behaviour of kmemcheck when the
31 kernel boots and no kmemcheck= parameter is given.
32
33config KMEMCHECK_DISABLED_BY_DEFAULT
34 bool "disabled"
35 depends on KMEMCHECK
36
37config KMEMCHECK_ENABLED_BY_DEFAULT
38 bool "enabled"
39 depends on KMEMCHECK
40
41config KMEMCHECK_ONESHOT_BY_DEFAULT
42 bool "one-shot"
43 depends on KMEMCHECK
44 help
45 In one-shot mode, only the first error detected is reported before
46 kmemcheck is disabled.
47
48endchoice
49
50config KMEMCHECK_QUEUE_SIZE
51 int "kmemcheck: error queue size"
52 depends on KMEMCHECK
53 default 64
54 help
55 Select the maximum number of errors to store in the queue. Since
56 errors can occur virtually anywhere and in any context, we need a
57 temporary storage area which is guarantueed not to generate any
58 other faults. The queue will be emptied as soon as a tasklet may
59 be scheduled. If the queue is full, new error reports will be
60 lost.
61
62config KMEMCHECK_SHADOW_COPY_SHIFT
63 int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)"
64 depends on KMEMCHECK
65 range 2 8
66 default 5
67 help
68 Select the number of shadow bytes to save along with each entry of
69 the queue. These bytes indicate what parts of an allocation are
70 initialized, uninitialized, etc. and will be displayed when an
71 error is detected to help the debugging of a particular problem.
72
73config KMEMCHECK_PARTIAL_OK
74 bool "kmemcheck: allow partially uninitialized memory"
75 depends on KMEMCHECK
76 default y
77 help
78 This option works around certain GCC optimizations that produce
79 32-bit reads from 16-bit variables where the upper 16 bits are
80 thrown away afterwards. This may of course also hide some real
81 bugs.
82
83config KMEMCHECK_BITOPS_OK
84 bool "kmemcheck: allow bit-field manipulation"
85 depends on KMEMCHECK
86 default n
87 help
88 This option silences warnings that would be generated for bit-field
89 accesses where not all the bits are initialized at the same time.
90 This may also hide some real bugs.
91
diff --git a/lib/Makefile b/lib/Makefile
index 1f6edefebffe..b6d1857bbf08 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
21 21
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o 24 string_helpers.o gcd.o
25 25
26ifeq ($(CONFIG_DEBUG_KOBJECT),y) 26ifeq ($(CONFIG_DEBUG_KOBJECT),y)
27CFLAGS_kobject.o += -DDEBUG 27CFLAGS_kobject.o += -DDEBUG
@@ -93,6 +93,10 @@ obj-$(CONFIG_NLATTR) += nlattr.o
93 93
94obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o 94obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
95 95
96obj-$(CONFIG_GENERIC_CSUM) += checksum.o
97
98obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
99
96hostprogs-y := gen_crc32table 100hostprogs-y := gen_crc32table
97clean-files := crc32table.h 101clean-files := crc32table.h
98 102
diff --git a/lib/atomic64.c b/lib/atomic64.c
new file mode 100644
index 000000000000..c5e725562416
--- /dev/null
+++ b/lib/atomic64.c
@@ -0,0 +1,175 @@
1/*
2 * Generic implementation of 64-bit atomics using spinlocks,
3 * useful on processors that don't have 64-bit atomic instructions.
4 *
5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/types.h>
13#include <linux/cache.h>
14#include <linux/spinlock.h>
15#include <linux/init.h>
16#include <asm/atomic.h>
17
18/*
19 * We use a hashed array of spinlocks to provide exclusive access
20 * to each atomic64_t variable. Since this is expected to used on
21 * systems with small numbers of CPUs (<= 4 or so), we use a
22 * relatively small array of 16 spinlocks to avoid wasting too much
23 * memory on the spinlock array.
24 */
25#define NR_LOCKS 16
26
27/*
28 * Ensure each lock is in a separate cacheline.
29 */
30static union {
31 spinlock_t lock;
32 char pad[L1_CACHE_BYTES];
33} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
34
35static inline spinlock_t *lock_addr(const atomic64_t *v)
36{
37 unsigned long addr = (unsigned long) v;
38
39 addr >>= L1_CACHE_SHIFT;
40 addr ^= (addr >> 8) ^ (addr >> 16);
41 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
42}
43
44long long atomic64_read(const atomic64_t *v)
45{
46 unsigned long flags;
47 spinlock_t *lock = lock_addr(v);
48 long long val;
49
50 spin_lock_irqsave(lock, flags);
51 val = v->counter;
52 spin_unlock_irqrestore(lock, flags);
53 return val;
54}
55
56void atomic64_set(atomic64_t *v, long long i)
57{
58 unsigned long flags;
59 spinlock_t *lock = lock_addr(v);
60
61 spin_lock_irqsave(lock, flags);
62 v->counter = i;
63 spin_unlock_irqrestore(lock, flags);
64}
65
66void atomic64_add(long long a, atomic64_t *v)
67{
68 unsigned long flags;
69 spinlock_t *lock = lock_addr(v);
70
71 spin_lock_irqsave(lock, flags);
72 v->counter += a;
73 spin_unlock_irqrestore(lock, flags);
74}
75
76long long atomic64_add_return(long long a, atomic64_t *v)
77{
78 unsigned long flags;
79 spinlock_t *lock = lock_addr(v);
80 long long val;
81
82 spin_lock_irqsave(lock, flags);
83 val = v->counter += a;
84 spin_unlock_irqrestore(lock, flags);
85 return val;
86}
87
88void atomic64_sub(long long a, atomic64_t *v)
89{
90 unsigned long flags;
91 spinlock_t *lock = lock_addr(v);
92
93 spin_lock_irqsave(lock, flags);
94 v->counter -= a;
95 spin_unlock_irqrestore(lock, flags);
96}
97
98long long atomic64_sub_return(long long a, atomic64_t *v)
99{
100 unsigned long flags;
101 spinlock_t *lock = lock_addr(v);
102 long long val;
103
104 spin_lock_irqsave(lock, flags);
105 val = v->counter -= a;
106 spin_unlock_irqrestore(lock, flags);
107 return val;
108}
109
110long long atomic64_dec_if_positive(atomic64_t *v)
111{
112 unsigned long flags;
113 spinlock_t *lock = lock_addr(v);
114 long long val;
115
116 spin_lock_irqsave(lock, flags);
117 val = v->counter - 1;
118 if (val >= 0)
119 v->counter = val;
120 spin_unlock_irqrestore(lock, flags);
121 return val;
122}
123
124long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
125{
126 unsigned long flags;
127 spinlock_t *lock = lock_addr(v);
128 long long val;
129
130 spin_lock_irqsave(lock, flags);
131 val = v->counter;
132 if (val == o)
133 v->counter = n;
134 spin_unlock_irqrestore(lock, flags);
135 return val;
136}
137
138long long atomic64_xchg(atomic64_t *v, long long new)
139{
140 unsigned long flags;
141 spinlock_t *lock = lock_addr(v);
142 long long val;
143
144 spin_lock_irqsave(lock, flags);
145 val = v->counter;
146 v->counter = new;
147 spin_unlock_irqrestore(lock, flags);
148 return val;
149}
150
151int atomic64_add_unless(atomic64_t *v, long long a, long long u)
152{
153 unsigned long flags;
154 spinlock_t *lock = lock_addr(v);
155 int ret = 1;
156
157 spin_lock_irqsave(lock, flags);
158 if (v->counter != u) {
159 v->counter += a;
160 ret = 0;
161 }
162 spin_unlock_irqrestore(lock, flags);
163 return ret;
164}
165
166static int init_atomic64_lock(void)
167{
168 int i;
169
170 for (i = 0; i < NR_LOCKS; ++i)
171 spin_lock_init(&atomic64_lock[i].lock);
172 return 0;
173}
174
175pure_initcall(init_atomic64_lock);
diff --git a/lib/checksum.c b/lib/checksum.c
new file mode 100644
index 000000000000..b2e2fd468461
--- /dev/null
+++ b/lib/checksum.c
@@ -0,0 +1,201 @@
1/*
2 *
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * IP/TCP/UDP checksumming routines
8 *
9 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Tom May, <ftom@netcom.com>
12 * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
13 * Lots of code moved from tcp.c and ip.c; see those files
14 * for more names.
15 *
16 * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
17 * Fixed some nasty bugs, causing some horrible crashes.
18 * A: At some points, the sum (%0) was used as
19 * length-counter instead of the length counter
20 * (%1). Thanks to Roman Hodek for pointing this out.
21 * B: GCC seems to mess up if one uses too many
22 * data-registers to hold input values and one tries to
23 * specify d0 and d1 as scratch registers. Letting gcc
24 * choose these registers itself solves the problem.
25 *
26 * This program is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU General Public License
28 * as published by the Free Software Foundation; either version
29 * 2 of the License, or (at your option) any later version.
30 */
31
32/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
33 kills, so most of the assembly has to go. */
34
35#include <linux/module.h>
36#include <net/checksum.h>
37
38#include <asm/byteorder.h>
39
40static inline unsigned short from32to16(unsigned long x)
41{
42 /* add up 16-bit and 16-bit for 16+c bit */
43 x = (x & 0xffff) + (x >> 16);
44 /* add up carry.. */
45 x = (x & 0xffff) + (x >> 16);
46 return x;
47}
48
49static unsigned int do_csum(const unsigned char *buff, int len)
50{
51 int odd, count;
52 unsigned long result = 0;
53
54 if (len <= 0)
55 goto out;
56 odd = 1 & (unsigned long) buff;
57 if (odd) {
58#ifdef __LITTLE_ENDIAN
59 result = *buff;
60#else
61 result += (*buff << 8);
62#endif
63 len--;
64 buff++;
65 }
66 count = len >> 1; /* nr of 16-bit words.. */
67 if (count) {
68 if (2 & (unsigned long) buff) {
69 result += *(unsigned short *) buff;
70 count--;
71 len -= 2;
72 buff += 2;
73 }
74 count >>= 1; /* nr of 32-bit words.. */
75 if (count) {
76 unsigned long carry = 0;
77 do {
78 unsigned long w = *(unsigned int *) buff;
79 count--;
80 buff += 4;
81 result += carry;
82 result += w;
83 carry = (w > result);
84 } while (count);
85 result += carry;
86 result = (result & 0xffff) + (result >> 16);
87 }
88 if (len & 2) {
89 result += *(unsigned short *) buff;
90 buff += 2;
91 }
92 }
93 if (len & 1)
94#ifdef __LITTLE_ENDIAN
95 result += *buff;
96#else
97 result += (*buff << 8);
98#endif
99 result = from32to16(result);
100 if (odd)
101 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
102out:
103 return result;
104}
105
106/*
107 * This is a version of ip_compute_csum() optimized for IP headers,
108 * which always checksum on 4 octet boundaries.
109 */
110__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
111{
112 return (__force __sum16)~do_csum(iph, ihl*4);
113}
114EXPORT_SYMBOL(ip_fast_csum);
115
116/*
117 * computes the checksum of a memory block at buff, length len,
118 * and adds in "sum" (32-bit)
119 *
120 * returns a 32-bit number suitable for feeding into itself
121 * or csum_tcpudp_magic
122 *
123 * this function must be called with even lengths, except
124 * for the last fragment, which may be odd
125 *
126 * it's best to have buff aligned on a 32-bit boundary
127 */
128__wsum csum_partial(const void *buff, int len, __wsum wsum)
129{
130 unsigned int sum = (__force unsigned int)wsum;
131 unsigned int result = do_csum(buff, len);
132
133 /* add in old sum, and carry.. */
134 result += sum;
135 if (sum > result)
136 result += 1;
137 return (__force __wsum)result;
138}
139EXPORT_SYMBOL(csum_partial);
140
141/*
142 * this routine is used for miscellaneous IP-like checksums, mainly
143 * in icmp.c
144 */
145__sum16 ip_compute_csum(const void *buff, int len)
146{
147 return (__force __sum16)~do_csum(buff, len);
148}
149EXPORT_SYMBOL(ip_compute_csum);
150
151/*
152 * copy from fs while checksumming, otherwise like csum_partial
153 */
154__wsum
155csum_partial_copy_from_user(const void __user *src, void *dst, int len,
156 __wsum sum, int *csum_err)
157{
158 int missing;
159
160 missing = __copy_from_user(dst, src, len);
161 if (missing) {
162 memset(dst + len - missing, 0, missing);
163 *csum_err = -EFAULT;
164 } else
165 *csum_err = 0;
166
167 return csum_partial(dst, len, sum);
168}
169EXPORT_SYMBOL(csum_partial_copy_from_user);
170
171/*
172 * copy from ds while checksumming, otherwise like csum_partial
173 */
174__wsum
175csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
176{
177 memcpy(dst, src, len);
178 return csum_partial(dst, len, sum);
179}
180EXPORT_SYMBOL(csum_partial_copy);
181
182#ifndef csum_tcpudp_nofold
183__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
184 unsigned short len,
185 unsigned short proto,
186 __wsum sum)
187{
188 unsigned long long s = (__force u32)sum;
189
190 s += (__force u32)saddr;
191 s += (__force u32)daddr;
192#ifdef __BIG_ENDIAN
193 s += proto + len;
194#else
195 s += (proto + len) << 8;
196#endif
197 s += (s >> 32);
198 return (__force __wsum)s;
199}
200EXPORT_SYMBOL(csum_tcpudp_nofold);
201#endif
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index a65c31455541..e73822aa6e9a 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -19,11 +19,10 @@
19 */ 19 */
20int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 20int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
21{ 21{
22#ifdef CONFIG_SMP
23 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ 22 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
24 if (atomic_add_unless(atomic, -1, 1)) 23 if (atomic_add_unless(atomic, -1, 1))
25 return 0; 24 return 0;
26#endif 25
27 /* Otherwise do it the slow way */ 26 /* Otherwise do it the slow way */
28 spin_lock(lock); 27 spin_lock(lock);
29 if (atomic_dec_and_test(atomic)) 28 if (atomic_dec_and_test(atomic))
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ad65fc0317d9..65b0d99b6d0a 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -262,11 +262,12 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
262 */ 262 */
263 matches += 1; 263 matches += 1;
264 match_lvl = 0; 264 match_lvl = 0;
265 entry->size == ref->size ? ++match_lvl : match_lvl; 265 entry->size == ref->size ? ++match_lvl : 0;
266 entry->type == ref->type ? ++match_lvl : match_lvl; 266 entry->type == ref->type ? ++match_lvl : 0;
267 entry->direction == ref->direction ? ++match_lvl : match_lvl; 267 entry->direction == ref->direction ? ++match_lvl : 0;
268 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
268 269
269 if (match_lvl == 3) { 270 if (match_lvl == 4) {
270 /* perfect-fit - return the result */ 271 /* perfect-fit - return the result */
271 return entry; 272 return entry;
272 } else if (match_lvl > last_lvl) { 273 } else if (match_lvl > last_lvl) {
@@ -715,7 +716,7 @@ void dma_debug_init(u32 num_entries)
715 716
716 for (i = 0; i < HASH_SIZE; ++i) { 717 for (i = 0; i < HASH_SIZE; ++i) {
717 INIT_LIST_HEAD(&dma_entry_hash[i].list); 718 INIT_LIST_HEAD(&dma_entry_hash[i].list);
718 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; 719 spin_lock_init(&dma_entry_hash[i].lock);
719 } 720 }
720 721
721 if (dma_debug_fs_init() != 0) { 722 if (dma_debug_fs_init() != 0) {
@@ -855,90 +856,85 @@ static void check_for_stack(struct device *dev, void *addr)
855 "stack [addr=%p]\n", addr); 856 "stack [addr=%p]\n", addr);
856} 857}
857 858
858static inline bool overlap(void *addr, u64 size, void *start, void *end) 859static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
859{ 860{
860 void *addr2 = (char *)addr + size; 861 unsigned long a1 = (unsigned long)addr;
862 unsigned long b1 = a1 + len;
863 unsigned long a2 = (unsigned long)start;
864 unsigned long b2 = (unsigned long)end;
861 865
862 return ((addr >= start && addr < end) || 866 return !(b1 <= a2 || a1 >= b2);
863 (addr2 >= start && addr2 < end) ||
864 ((addr < start) && (addr2 >= end)));
865} 867}
866 868
867static void check_for_illegal_area(struct device *dev, void *addr, u64 size) 869static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
868{ 870{
869 if (overlap(addr, size, _text, _etext) || 871 if (overlap(addr, len, _text, _etext) ||
870 overlap(addr, size, __start_rodata, __end_rodata)) 872 overlap(addr, len, __start_rodata, __end_rodata))
871 err_printk(dev, NULL, "DMA-API: device driver maps " 873 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
872 "memory from kernel text or rodata "
873 "[addr=%p] [size=%llu]\n", addr, size);
874} 874}
875 875
876static void check_sync(struct device *dev, dma_addr_t addr, 876static void check_sync(struct device *dev,
877 u64 size, u64 offset, int direction, bool to_cpu) 877 struct dma_debug_entry *ref,
878 bool to_cpu)
878{ 879{
879 struct dma_debug_entry ref = {
880 .dev = dev,
881 .dev_addr = addr,
882 .size = size,
883 .direction = direction,
884 };
885 struct dma_debug_entry *entry; 880 struct dma_debug_entry *entry;
886 struct hash_bucket *bucket; 881 struct hash_bucket *bucket;
887 unsigned long flags; 882 unsigned long flags;
888 883
889 bucket = get_hash_bucket(&ref, &flags); 884 bucket = get_hash_bucket(ref, &flags);
890 885
891 entry = hash_bucket_find(bucket, &ref); 886 entry = hash_bucket_find(bucket, ref);
892 887
893 if (!entry) { 888 if (!entry) {
894 err_printk(dev, NULL, "DMA-API: device driver tries " 889 err_printk(dev, NULL, "DMA-API: device driver tries "
895 "to sync DMA memory it has not allocated " 890 "to sync DMA memory it has not allocated "
896 "[device address=0x%016llx] [size=%llu bytes]\n", 891 "[device address=0x%016llx] [size=%llu bytes]\n",
897 (unsigned long long)addr, size); 892 (unsigned long long)ref->dev_addr, ref->size);
898 goto out; 893 goto out;
899 } 894 }
900 895
901 if ((offset + size) > entry->size) { 896 if (ref->size > entry->size) {
902 err_printk(dev, entry, "DMA-API: device driver syncs" 897 err_printk(dev, entry, "DMA-API: device driver syncs"
903 " DMA memory outside allocated range " 898 " DMA memory outside allocated range "
904 "[device address=0x%016llx] " 899 "[device address=0x%016llx] "
905 "[allocation size=%llu bytes] [sync offset=%llu] " 900 "[allocation size=%llu bytes] "
906 "[sync size=%llu]\n", entry->dev_addr, entry->size, 901 "[sync offset+size=%llu]\n",
907 offset, size); 902 entry->dev_addr, entry->size,
903 ref->size);
908 } 904 }
909 905
910 if (direction != entry->direction) { 906 if (ref->direction != entry->direction) {
911 err_printk(dev, entry, "DMA-API: device driver syncs " 907 err_printk(dev, entry, "DMA-API: device driver syncs "
912 "DMA memory with different direction " 908 "DMA memory with different direction "
913 "[device address=0x%016llx] [size=%llu bytes] " 909 "[device address=0x%016llx] [size=%llu bytes] "
914 "[mapped with %s] [synced with %s]\n", 910 "[mapped with %s] [synced with %s]\n",
915 (unsigned long long)addr, entry->size, 911 (unsigned long long)ref->dev_addr, entry->size,
916 dir2name[entry->direction], 912 dir2name[entry->direction],
917 dir2name[direction]); 913 dir2name[ref->direction]);
918 } 914 }
919 915
920 if (entry->direction == DMA_BIDIRECTIONAL) 916 if (entry->direction == DMA_BIDIRECTIONAL)
921 goto out; 917 goto out;
922 918
923 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 919 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
924 !(direction == DMA_TO_DEVICE)) 920 !(ref->direction == DMA_TO_DEVICE))
925 err_printk(dev, entry, "DMA-API: device driver syncs " 921 err_printk(dev, entry, "DMA-API: device driver syncs "
926 "device read-only DMA memory for cpu " 922 "device read-only DMA memory for cpu "
927 "[device address=0x%016llx] [size=%llu bytes] " 923 "[device address=0x%016llx] [size=%llu bytes] "
928 "[mapped with %s] [synced with %s]\n", 924 "[mapped with %s] [synced with %s]\n",
929 (unsigned long long)addr, entry->size, 925 (unsigned long long)ref->dev_addr, entry->size,
930 dir2name[entry->direction], 926 dir2name[entry->direction],
931 dir2name[direction]); 927 dir2name[ref->direction]);
932 928
933 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 929 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
934 !(direction == DMA_FROM_DEVICE)) 930 !(ref->direction == DMA_FROM_DEVICE))
935 err_printk(dev, entry, "DMA-API: device driver syncs " 931 err_printk(dev, entry, "DMA-API: device driver syncs "
936 "device write-only DMA memory to device " 932 "device write-only DMA memory to device "
937 "[device address=0x%016llx] [size=%llu bytes] " 933 "[device address=0x%016llx] [size=%llu bytes] "
938 "[mapped with %s] [synced with %s]\n", 934 "[mapped with %s] [synced with %s]\n",
939 (unsigned long long)addr, entry->size, 935 (unsigned long long)ref->dev_addr, entry->size,
940 dir2name[entry->direction], 936 dir2name[entry->direction],
941 dir2name[direction]); 937 dir2name[ref->direction]);
942 938
943out: 939out:
944 put_hash_bucket(bucket, &flags); 940 put_hash_bucket(bucket, &flags);
@@ -972,7 +968,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
972 entry->type = dma_debug_single; 968 entry->type = dma_debug_single;
973 969
974 if (!PageHighMem(page)) { 970 if (!PageHighMem(page)) {
975 void *addr = ((char *)page_address(page)) + offset; 971 void *addr = page_address(page) + offset;
972
976 check_for_stack(dev, addr); 973 check_for_stack(dev, addr);
977 check_for_illegal_area(dev, addr, size); 974 check_for_illegal_area(dev, addr, size);
978 } 975 }
@@ -1036,19 +1033,16 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1036} 1033}
1037EXPORT_SYMBOL(debug_dma_map_sg); 1034EXPORT_SYMBOL(debug_dma_map_sg);
1038 1035
1039static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s) 1036static int get_nr_mapped_entries(struct device *dev,
1037 struct dma_debug_entry *ref)
1040{ 1038{
1041 struct dma_debug_entry *entry, ref; 1039 struct dma_debug_entry *entry;
1042 struct hash_bucket *bucket; 1040 struct hash_bucket *bucket;
1043 unsigned long flags; 1041 unsigned long flags;
1044 int mapped_ents; 1042 int mapped_ents;
1045 1043
1046 ref.dev = dev; 1044 bucket = get_hash_bucket(ref, &flags);
1047 ref.dev_addr = sg_dma_address(s); 1045 entry = hash_bucket_find(bucket, ref);
1048 ref.size = sg_dma_len(s),
1049
1050 bucket = get_hash_bucket(&ref, &flags);
1051 entry = hash_bucket_find(bucket, &ref);
1052 mapped_ents = 0; 1046 mapped_ents = 0;
1053 1047
1054 if (entry) 1048 if (entry)
@@ -1076,16 +1070,14 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1076 .dev_addr = sg_dma_address(s), 1070 .dev_addr = sg_dma_address(s),
1077 .size = sg_dma_len(s), 1071 .size = sg_dma_len(s),
1078 .direction = dir, 1072 .direction = dir,
1079 .sg_call_ents = 0, 1073 .sg_call_ents = nelems,
1080 }; 1074 };
1081 1075
1082 if (mapped_ents && i >= mapped_ents) 1076 if (mapped_ents && i >= mapped_ents)
1083 break; 1077 break;
1084 1078
1085 if (!i) { 1079 if (!i)
1086 ref.sg_call_ents = nelems; 1080 mapped_ents = get_nr_mapped_entries(dev, &ref);
1087 mapped_ents = get_nr_mapped_entries(dev, s);
1088 }
1089 1081
1090 check_unmap(&ref); 1082 check_unmap(&ref);
1091 } 1083 }
@@ -1140,10 +1132,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent);
1140void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1132void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1141 size_t size, int direction) 1133 size_t size, int direction)
1142{ 1134{
1135 struct dma_debug_entry ref;
1136
1143 if (unlikely(global_disable)) 1137 if (unlikely(global_disable))
1144 return; 1138 return;
1145 1139
1146 check_sync(dev, dma_handle, size, 0, direction, true); 1140 ref.type = dma_debug_single;
1141 ref.dev = dev;
1142 ref.dev_addr = dma_handle;
1143 ref.size = size;
1144 ref.direction = direction;
1145 ref.sg_call_ents = 0;
1146
1147 check_sync(dev, &ref, true);
1147} 1148}
1148EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 1149EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1149 1150
@@ -1151,10 +1152,19 @@ void debug_dma_sync_single_for_device(struct device *dev,
1151 dma_addr_t dma_handle, size_t size, 1152 dma_addr_t dma_handle, size_t size,
1152 int direction) 1153 int direction)
1153{ 1154{
1155 struct dma_debug_entry ref;
1156
1154 if (unlikely(global_disable)) 1157 if (unlikely(global_disable))
1155 return; 1158 return;
1156 1159
1157 check_sync(dev, dma_handle, size, 0, direction, false); 1160 ref.type = dma_debug_single;
1161 ref.dev = dev;
1162 ref.dev_addr = dma_handle;
1163 ref.size = size;
1164 ref.direction = direction;
1165 ref.sg_call_ents = 0;
1166
1167 check_sync(dev, &ref, false);
1158} 1168}
1159EXPORT_SYMBOL(debug_dma_sync_single_for_device); 1169EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1160 1170
@@ -1163,10 +1173,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
1163 unsigned long offset, size_t size, 1173 unsigned long offset, size_t size,
1164 int direction) 1174 int direction)
1165{ 1175{
1176 struct dma_debug_entry ref;
1177
1166 if (unlikely(global_disable)) 1178 if (unlikely(global_disable))
1167 return; 1179 return;
1168 1180
1169 check_sync(dev, dma_handle, size, offset, direction, true); 1181 ref.type = dma_debug_single;
1182 ref.dev = dev;
1183 ref.dev_addr = dma_handle;
1184 ref.size = offset + size;
1185 ref.direction = direction;
1186 ref.sg_call_ents = 0;
1187
1188 check_sync(dev, &ref, true);
1170} 1189}
1171EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); 1190EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1172 1191
@@ -1175,10 +1194,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
1175 unsigned long offset, 1194 unsigned long offset,
1176 size_t size, int direction) 1195 size_t size, int direction)
1177{ 1196{
1197 struct dma_debug_entry ref;
1198
1178 if (unlikely(global_disable)) 1199 if (unlikely(global_disable))
1179 return; 1200 return;
1180 1201
1181 check_sync(dev, dma_handle, size, offset, direction, false); 1202 ref.type = dma_debug_single;
1203 ref.dev = dev;
1204 ref.dev_addr = dma_handle;
1205 ref.size = offset + size;
1206 ref.direction = direction;
1207 ref.sg_call_ents = 0;
1208
1209 check_sync(dev, &ref, false);
1182} 1210}
1183EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); 1211EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1184 1212
@@ -1192,14 +1220,24 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1192 return; 1220 return;
1193 1221
1194 for_each_sg(sg, s, nelems, i) { 1222 for_each_sg(sg, s, nelems, i) {
1223
1224 struct dma_debug_entry ref = {
1225 .type = dma_debug_sg,
1226 .dev = dev,
1227 .paddr = sg_phys(s),
1228 .dev_addr = sg_dma_address(s),
1229 .size = sg_dma_len(s),
1230 .direction = direction,
1231 .sg_call_ents = nelems,
1232 };
1233
1195 if (!i) 1234 if (!i)
1196 mapped_ents = get_nr_mapped_entries(dev, s); 1235 mapped_ents = get_nr_mapped_entries(dev, &ref);
1197 1236
1198 if (i >= mapped_ents) 1237 if (i >= mapped_ents)
1199 break; 1238 break;
1200 1239
1201 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, 1240 check_sync(dev, &ref, true);
1202 direction, true);
1203 } 1241 }
1204} 1242}
1205EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1243EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -1214,14 +1252,23 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1214 return; 1252 return;
1215 1253
1216 for_each_sg(sg, s, nelems, i) { 1254 for_each_sg(sg, s, nelems, i) {
1255
1256 struct dma_debug_entry ref = {
1257 .type = dma_debug_sg,
1258 .dev = dev,
1259 .paddr = sg_phys(s),
1260 .dev_addr = sg_dma_address(s),
1261 .size = sg_dma_len(s),
1262 .direction = direction,
1263 .sg_call_ents = nelems,
1264 };
1217 if (!i) 1265 if (!i)
1218 mapped_ents = get_nr_mapped_entries(dev, s); 1266 mapped_ents = get_nr_mapped_entries(dev, &ref);
1219 1267
1220 if (i >= mapped_ents) 1268 if (i >= mapped_ents)
1221 break; 1269 break;
1222 1270
1223 check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, 1271 check_sync(dev, &ref, false);
1224 direction, false);
1225 } 1272 }
1226} 1273}
1227EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 1274EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
diff --git a/lib/extable.c b/lib/extable.c
index 179c08745595..4cac81ec225e 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -39,7 +39,26 @@ void sort_extable(struct exception_table_entry *start,
39 sort(start, finish - start, sizeof(struct exception_table_entry), 39 sort(start, finish - start, sizeof(struct exception_table_entry),
40 cmp_ex, NULL); 40 cmp_ex, NULL);
41} 41}
42#endif 42
43#ifdef CONFIG_MODULES
44/*
45 * If the exception table is sorted, any referring to the module init
46 * will be at the beginning or the end.
47 */
48void trim_init_extable(struct module *m)
49{
50 /*trim the beginning*/
51 while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
52 m->extable++;
53 m->num_exentries--;
54 }
55 /*trim the end*/
56 while (m->num_exentries &&
57 within_module_init(m->extable[m->num_exentries-1].insn, m))
58 m->num_exentries--;
59}
60#endif /* CONFIG_MODULES */
61#endif /* !ARCH_HAS_SORT_EXTABLE */
43 62
44#ifndef ARCH_HAS_SEARCH_EXTABLE 63#ifndef ARCH_HAS_SEARCH_EXTABLE
45/* 64/*
diff --git a/lib/gcd.c b/lib/gcd.c
new file mode 100644
index 000000000000..f879033d9822
--- /dev/null
+++ b/lib/gcd.c
@@ -0,0 +1,18 @@
1#include <linux/kernel.h>
2#include <linux/gcd.h>
3#include <linux/module.h>
4
5/* Greatest common divisor */
6unsigned long gcd(unsigned long a, unsigned long b)
7{
8 unsigned long r;
9
10 if (a < b)
11 swap(a, b);
12 while ((r = a % b) != 0) {
13 a = b;
14 b = r;
15 }
16 return b;
17}
18EXPORT_SYMBOL_GPL(gcd);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index f6d276db2d58..eed2bdb865e7 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -85,7 +85,6 @@ void gen_pool_destroy(struct gen_pool *pool)
85 int bit, end_bit; 85 int bit, end_bit;
86 86
87 87
88 write_lock(&pool->lock);
89 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { 88 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
90 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 89 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
91 list_del(&chunk->next_chunk); 90 list_del(&chunk->next_chunk);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index f07c0db81d26..39af2560f765 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -65,7 +65,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
65 65
66 for (j = 0; j < ngroups; j++) 66 for (j = 0; j < ngroups; j++)
67 lx += scnprintf(linebuf + lx, linebuflen - lx, 67 lx += scnprintf(linebuf + lx, linebuflen - lx,
68 "%16.16llx ", (unsigned long long)*(ptr8 + j)); 68 "%s%16.16llx", j ? " " : "",
69 (unsigned long long)*(ptr8 + j));
69 ascii_column = 17 * ngroups + 2; 70 ascii_column = 17 * ngroups + 2;
70 break; 71 break;
71 } 72 }
@@ -76,7 +77,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
76 77
77 for (j = 0; j < ngroups; j++) 78 for (j = 0; j < ngroups; j++)
78 lx += scnprintf(linebuf + lx, linebuflen - lx, 79 lx += scnprintf(linebuf + lx, linebuflen - lx,
79 "%8.8x ", *(ptr4 + j)); 80 "%s%8.8x", j ? " " : "", *(ptr4 + j));
80 ascii_column = 9 * ngroups + 2; 81 ascii_column = 9 * ngroups + 2;
81 break; 82 break;
82 } 83 }
@@ -87,19 +88,21 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
87 88
88 for (j = 0; j < ngroups; j++) 89 for (j = 0; j < ngroups; j++)
89 lx += scnprintf(linebuf + lx, linebuflen - lx, 90 lx += scnprintf(linebuf + lx, linebuflen - lx,
90 "%4.4x ", *(ptr2 + j)); 91 "%s%4.4x", j ? " " : "", *(ptr2 + j));
91 ascii_column = 5 * ngroups + 2; 92 ascii_column = 5 * ngroups + 2;
92 break; 93 break;
93 } 94 }
94 95
95 default: 96 default:
96 for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; 97 for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
97 j++) {
98 ch = ptr[j]; 98 ch = ptr[j];
99 linebuf[lx++] = hex_asc_hi(ch); 99 linebuf[lx++] = hex_asc_hi(ch);
100 linebuf[lx++] = hex_asc_lo(ch); 100 linebuf[lx++] = hex_asc_lo(ch);
101 linebuf[lx++] = ' '; 101 linebuf[lx++] = ' ';
102 } 102 }
103 if (j)
104 lx--;
105
103 ascii_column = 3 * rowsize + 2; 106 ascii_column = 3 * rowsize + 2;
104 break; 107 break;
105 } 108 }
@@ -108,7 +111,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
108 111
109 while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) 112 while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
110 linebuf[lx++] = ' '; 113 linebuf[lx++] = ' ';
111 for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++) 114 for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
112 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] 115 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
113 : '.'; 116 : '.';
114nil: 117nil:
diff --git a/lib/kobject.c b/lib/kobject.c
index bacf6fe4f7a0..b512b746d2af 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -793,11 +793,16 @@ static struct kset *kset_create(const char *name,
793 struct kobject *parent_kobj) 793 struct kobject *parent_kobj)
794{ 794{
795 struct kset *kset; 795 struct kset *kset;
796 int retval;
796 797
797 kset = kzalloc(sizeof(*kset), GFP_KERNEL); 798 kset = kzalloc(sizeof(*kset), GFP_KERNEL);
798 if (!kset) 799 if (!kset)
799 return NULL; 800 return NULL;
800 kobject_set_name(&kset->kobj, name); 801 retval = kobject_set_name(&kset->kobj, name);
802 if (retval) {
803 kfree(kset);
804 return NULL;
805 }
801 kset->uevent_ops = uevent_ops; 806 kset->uevent_ops = uevent_ops;
802 kset->kobj.parent = parent_kobj; 807 kset->kobj.parent = parent_kobj;
803 808
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 4bb42a0344ec..23abbd93cae1 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -351,20 +351,12 @@ int radix_tree_insert(struct radix_tree_root *root,
351} 351}
352EXPORT_SYMBOL(radix_tree_insert); 352EXPORT_SYMBOL(radix_tree_insert);
353 353
354/** 354/*
355 * radix_tree_lookup_slot - lookup a slot in a radix tree 355 * is_slot == 1 : search for the slot.
356 * @root: radix tree root 356 * is_slot == 0 : search for the node.
357 * @index: index key
358 *
359 * Returns: the slot corresponding to the position @index in the
360 * radix tree @root. This is useful for update-if-exists operations.
361 *
362 * This function can be called under rcu_read_lock iff the slot is not
363 * modified by radix_tree_replace_slot, otherwise it must be called
364 * exclusive from other writers. Any dereference of the slot must be done
365 * using radix_tree_deref_slot.
366 */ 357 */
367void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) 358static void *radix_tree_lookup_element(struct radix_tree_root *root,
359 unsigned long index, int is_slot)
368{ 360{
369 unsigned int height, shift; 361 unsigned int height, shift;
370 struct radix_tree_node *node, **slot; 362 struct radix_tree_node *node, **slot;
@@ -376,7 +368,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
376 if (!radix_tree_is_indirect_ptr(node)) { 368 if (!radix_tree_is_indirect_ptr(node)) {
377 if (index > 0) 369 if (index > 0)
378 return NULL; 370 return NULL;
379 return (void **)&root->rnode; 371 return is_slot ? (void *)&root->rnode : node;
380 } 372 }
381 node = radix_tree_indirect_to_ptr(node); 373 node = radix_tree_indirect_to_ptr(node);
382 374
@@ -397,7 +389,25 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
397 height--; 389 height--;
398 } while (height > 0); 390 } while (height > 0);
399 391
400 return (void **)slot; 392 return is_slot ? (void *)slot:node;
393}
394
395/**
396 * radix_tree_lookup_slot - lookup a slot in a radix tree
397 * @root: radix tree root
398 * @index: index key
399 *
400 * Returns: the slot corresponding to the position @index in the
401 * radix tree @root. This is useful for update-if-exists operations.
402 *
403 * This function can be called under rcu_read_lock iff the slot is not
404 * modified by radix_tree_replace_slot, otherwise it must be called
405 * exclusive from other writers. Any dereference of the slot must be done
406 * using radix_tree_deref_slot.
407 */
408void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
409{
410 return (void **)radix_tree_lookup_element(root, index, 1);
401} 411}
402EXPORT_SYMBOL(radix_tree_lookup_slot); 412EXPORT_SYMBOL(radix_tree_lookup_slot);
403 413
@@ -415,38 +425,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
415 */ 425 */
416void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) 426void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
417{ 427{
418 unsigned int height, shift; 428 return radix_tree_lookup_element(root, index, 0);
419 struct radix_tree_node *node, **slot;
420
421 node = rcu_dereference(root->rnode);
422 if (node == NULL)
423 return NULL;
424
425 if (!radix_tree_is_indirect_ptr(node)) {
426 if (index > 0)
427 return NULL;
428 return node;
429 }
430 node = radix_tree_indirect_to_ptr(node);
431
432 height = node->height;
433 if (index > radix_tree_maxindex(height))
434 return NULL;
435
436 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
437
438 do {
439 slot = (struct radix_tree_node **)
440 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
441 node = rcu_dereference(*slot);
442 if (node == NULL)
443 return NULL;
444
445 shift -= RADIX_TREE_MAP_SHIFT;
446 height--;
447 } while (height > 0);
448
449 return node;
450} 429}
451EXPORT_SYMBOL(radix_tree_lookup); 430EXPORT_SYMBOL(radix_tree_lookup);
452 431
@@ -666,6 +645,43 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
666} 645}
667EXPORT_SYMBOL(radix_tree_next_hole); 646EXPORT_SYMBOL(radix_tree_next_hole);
668 647
648/**
649 * radix_tree_prev_hole - find the prev hole (not-present entry)
650 * @root: tree root
651 * @index: index key
652 * @max_scan: maximum range to search
653 *
654 * Search backwards in the range [max(index-max_scan+1, 0), index]
655 * for the first hole.
656 *
657 * Returns: the index of the hole if found, otherwise returns an index
658 * outside of the set specified (in which case 'index - return >= max_scan'
659 * will be true). In rare cases of wrap-around, LONG_MAX will be returned.
660 *
661 * radix_tree_next_hole may be called under rcu_read_lock. However, like
662 * radix_tree_gang_lookup, this will not atomically search a snapshot of
663 * the tree at a single point in time. For example, if a hole is created
664 * at index 10, then subsequently a hole is created at index 5,
665 * radix_tree_prev_hole covering both indexes may return 5 if called under
666 * rcu_read_lock.
667 */
668unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
669 unsigned long index, unsigned long max_scan)
670{
671 unsigned long i;
672
673 for (i = 0; i < max_scan; i++) {
674 if (!radix_tree_lookup(root, index))
675 break;
676 index--;
677 if (index == LONG_MAX)
678 break;
679 }
680
681 return index;
682}
683EXPORT_SYMBOL(radix_tree_prev_hole);
684
669static unsigned int 685static unsigned int
670__lookup(struct radix_tree_node *slot, void ***results, unsigned long index, 686__lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
671 unsigned int max_items, unsigned long *next_index) 687 unsigned int max_items, unsigned long *next_index)
diff --git a/lib/rbtree.c b/lib/rbtree.c
index f653659e0bc1..e2aa3be29858 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -231,34 +231,34 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
231 node = node->rb_right; 231 node = node->rb_right;
232 while ((left = node->rb_left) != NULL) 232 while ((left = node->rb_left) != NULL)
233 node = left; 233 node = left;
234
235 if (rb_parent(old)) {
236 if (rb_parent(old)->rb_left == old)
237 rb_parent(old)->rb_left = node;
238 else
239 rb_parent(old)->rb_right = node;
240 } else
241 root->rb_node = node;
242
234 child = node->rb_right; 243 child = node->rb_right;
235 parent = rb_parent(node); 244 parent = rb_parent(node);
236 color = rb_color(node); 245 color = rb_color(node);
237 246
238 if (child)
239 rb_set_parent(child, parent);
240 if (parent == old) { 247 if (parent == old) {
241 parent->rb_right = child;
242 parent = node; 248 parent = node;
243 } else 249 } else {
250 if (child)
251 rb_set_parent(child, parent);
244 parent->rb_left = child; 252 parent->rb_left = child;
245 253
254 node->rb_right = old->rb_right;
255 rb_set_parent(old->rb_right, node);
256 }
257
246 node->rb_parent_color = old->rb_parent_color; 258 node->rb_parent_color = old->rb_parent_color;
247 node->rb_right = old->rb_right;
248 node->rb_left = old->rb_left; 259 node->rb_left = old->rb_left;
249
250 if (rb_parent(old))
251 {
252 if (rb_parent(old)->rb_left == old)
253 rb_parent(old)->rb_left = node;
254 else
255 rb_parent(old)->rb_right = node;
256 } else
257 root->rb_node = node;
258
259 rb_set_parent(old->rb_left, node); 260 rb_set_parent(old->rb_left, node);
260 if (old->rb_right) 261
261 rb_set_parent(old->rb_right, node);
262 goto color; 262 goto color;
263 } 263 }
264 264