aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2009-09-14 00:16:56 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2009-09-14 00:16:56 -0400
commitfc8e1ead9314cf0e0f1922e661428b93d3a50d88 (patch)
treef3cb97c4769b74f6627a59769f1ed5c92a13c58a /lib
parent2bcaa6a4238094c5695d5b1943078388d82d3004 (diff)
parent9de48cc300fb10f7d9faa978670becf5e352462a (diff)
Merge branch 'next' into for-linus
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug53
-rw-r--r--lib/Kconfig.kmemcheck91
-rw-r--r--lib/Makefile9
-rw-r--r--lib/atomic64.c186
-rw-r--r--lib/bitmap.c12
-rw-r--r--lib/checksum.c201
-rw-r--r--lib/cpumask.c11
-rw-r--r--lib/dec_and_lock.c3
-rw-r--r--lib/decompress_bunzip2.c24
-rw-r--r--lib/decompress_inflate.c10
-rw-r--r--lib/decompress_unlzma.c23
-rw-r--r--lib/dma-debug.c589
-rw-r--r--lib/dynamic_debug.c2
-rw-r--r--lib/extable.c21
-rw-r--r--lib/flex_array.c268
-rw-r--r--lib/gcd.c18
-rw-r--r--lib/genalloc.c1
-rw-r--r--lib/hexdump.c15
-rw-r--r--lib/kobject.c7
-rw-r--r--lib/lmb.c2
-rw-r--r--lib/radix-tree.c110
-rw-r--r--lib/rational.c62
-rw-r--r--lib/rbtree.c34
-rw-r--r--lib/scatterlist.c16
-rw-r--r--lib/swiotlb.c119
-rw-r--r--lib/vsprintf.c56
27 files changed, 1669 insertions, 283 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 8ade0a7a91e0..bb1326d3839c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -10,6 +10,9 @@ menu "Library routines"
10config BITREVERSE 10config BITREVERSE
11 tristate 11 tristate
12 12
13config RATIONAL
14 boolean
15
13config GENERIC_FIND_FIRST_BIT 16config GENERIC_FIND_FIRST_BIT
14 bool 17 bool
15 18
@@ -191,4 +194,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
191config NLATTR 194config NLATTR
192 bool 195 bool
193 196
197#
198# Generic 64-bit atomic support is selected if needed
199#
200config GENERIC_ATOMIC64
201 bool
202
194endmenu 203endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6cdcf38f2da9..12327b2bb785 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -300,7 +300,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT
300 300
301config DEBUG_SLAB 301config DEBUG_SLAB
302 bool "Debug slab memory allocations" 302 bool "Debug slab memory allocations"
303 depends on DEBUG_KERNEL && SLAB 303 depends on DEBUG_KERNEL && SLAB && !KMEMCHECK
304 help 304 help
305 Say Y here to have the kernel do limited verification on memory 305 Say Y here to have the kernel do limited verification on memory
306 allocation as well as poisoning memory on free to catch use of freed 306 allocation as well as poisoning memory on free to catch use of freed
@@ -312,7 +312,7 @@ config DEBUG_SLAB_LEAK
312 312
313config SLUB_DEBUG_ON 313config SLUB_DEBUG_ON
314 bool "SLUB debugging on by default" 314 bool "SLUB debugging on by default"
315 depends on SLUB && SLUB_DEBUG 315 depends on SLUB && SLUB_DEBUG && !KMEMCHECK
316 default n 316 default n
317 help 317 help
318 Boot with debugging on by default. SLUB boots by default with 318 Boot with debugging on by default. SLUB boots by default with
@@ -336,6 +336,51 @@ config SLUB_STATS
336 out which slabs are relevant to a particular load. 336 out which slabs are relevant to a particular load.
337 Try running: slabinfo -DA 337 Try running: slabinfo -DA
338 338
339config DEBUG_KMEMLEAK
340 bool "Kernel memory leak detector"
341 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \
342 !MEMORY_HOTPLUG
343 select DEBUG_FS if SYSFS
344 select STACKTRACE if STACKTRACE_SUPPORT
345 select KALLSYMS
346 help
347 Say Y here if you want to enable the memory leak
348 detector. The memory allocation/freeing is traced in a way
349 similar to the Boehm's conservative garbage collector, the
350 difference being that the orphan objects are not freed but
351 only shown in /sys/kernel/debug/kmemleak. Enabling this
352 feature will introduce an overhead to memory
353 allocations. See Documentation/kmemleak.txt for more
354 details.
355
356 Enabling DEBUG_SLAB or SLUB_DEBUG may increase the chances
357 of finding leaks due to the slab objects poisoning.
358
359 In order to access the kmemleak file, debugfs needs to be
360 mounted (usually at /sys/kernel/debug).
361
362config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
363 int "Maximum kmemleak early log entries"
364 depends on DEBUG_KMEMLEAK
365 range 200 2000
366 default 400
367 help
368 Kmemleak must track all the memory allocations to avoid
369 reporting false positives. Since memory may be allocated or
370 freed before kmemleak is initialised, an early log buffer is
371 used to store these actions. If kmemleak reports "early log
372 buffer exceeded", please increase this value.
373
374config DEBUG_KMEMLEAK_TEST
375 tristate "Simple test for the kernel memory leak detector"
376 depends on DEBUG_KMEMLEAK
377 help
378 Say Y or M here to build a test for the kernel memory leak
379 detector. This option enables a module that explicitly leaks
380 memory.
381
382 If unsure, say N.
383
339config DEBUG_PREEMPT 384config DEBUG_PREEMPT
340 bool "Debug preemptible kernel" 385 bool "Debug preemptible kernel"
341 depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) 386 depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64)
@@ -440,7 +485,7 @@ config LOCKDEP
440 bool 485 bool
441 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 486 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
442 select STACKTRACE 487 select STACKTRACE
443 select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND && !S390 488 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390
444 select KALLSYMS 489 select KALLSYMS
445 select KALLSYMS_ALL 490 select KALLSYMS_ALL
446 491
@@ -964,3 +1009,5 @@ config DMA_API_DEBUG
964source "samples/Kconfig" 1009source "samples/Kconfig"
965 1010
966source "lib/Kconfig.kgdb" 1011source "lib/Kconfig.kgdb"
1012
1013source "lib/Kconfig.kmemcheck"
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck
new file mode 100644
index 000000000000..603c81b66549
--- /dev/null
+++ b/lib/Kconfig.kmemcheck
@@ -0,0 +1,91 @@
1config HAVE_ARCH_KMEMCHECK
2 bool
3
4menuconfig KMEMCHECK
5 bool "kmemcheck: trap use of uninitialized memory"
6 depends on DEBUG_KERNEL
7 depends on !X86_USE_3DNOW
8 depends on SLUB || SLAB
9 depends on !CC_OPTIMIZE_FOR_SIZE
10 depends on !FUNCTION_TRACER
11 select FRAME_POINTER
12 select STACKTRACE
13 default n
14 help
15 This option enables tracing of dynamically allocated kernel memory
16 to see if memory is used before it has been given an initial value.
17 Be aware that this requires half of your memory for bookkeeping and
18 will insert extra code at *every* read and write to tracked memory
19 thus slow down the kernel code (but user code is unaffected).
20
21 The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable
22 or enable kmemcheck at boot-time. If the kernel is started with
23 kmemcheck=0, the large memory and CPU overhead is not incurred.
24
25choice
26 prompt "kmemcheck: default mode at boot"
27 depends on KMEMCHECK
28 default KMEMCHECK_ONESHOT_BY_DEFAULT
29 help
30 This option controls the default behaviour of kmemcheck when the
31 kernel boots and no kmemcheck= parameter is given.
32
33config KMEMCHECK_DISABLED_BY_DEFAULT
34 bool "disabled"
35 depends on KMEMCHECK
36
37config KMEMCHECK_ENABLED_BY_DEFAULT
38 bool "enabled"
39 depends on KMEMCHECK
40
41config KMEMCHECK_ONESHOT_BY_DEFAULT
42 bool "one-shot"
43 depends on KMEMCHECK
44 help
45 In one-shot mode, only the first error detected is reported before
46 kmemcheck is disabled.
47
48endchoice
49
50config KMEMCHECK_QUEUE_SIZE
51 int "kmemcheck: error queue size"
52 depends on KMEMCHECK
53 default 64
54 help
55 Select the maximum number of errors to store in the queue. Since
56 errors can occur virtually anywhere and in any context, we need a
57 temporary storage area which is guarantueed not to generate any
58 other faults. The queue will be emptied as soon as a tasklet may
59 be scheduled. If the queue is full, new error reports will be
60 lost.
61
62config KMEMCHECK_SHADOW_COPY_SHIFT
63 int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)"
64 depends on KMEMCHECK
65 range 2 8
66 default 5
67 help
68 Select the number of shadow bytes to save along with each entry of
69 the queue. These bytes indicate what parts of an allocation are
70 initialized, uninitialized, etc. and will be displayed when an
71 error is detected to help the debugging of a particular problem.
72
73config KMEMCHECK_PARTIAL_OK
74 bool "kmemcheck: allow partially uninitialized memory"
75 depends on KMEMCHECK
76 default y
77 help
78 This option works around certain GCC optimizations that produce
79 32-bit reads from 16-bit variables where the upper 16 bits are
80 thrown away afterwards. This may of course also hide some real
81 bugs.
82
83config KMEMCHECK_BITOPS_OK
84 bool "kmemcheck: allow bit-field manipulation"
85 depends on KMEMCHECK
86 default n
87 help
88 This option silences warnings that would be generated for bit-field
89 accesses where not all the bits are initialized at the same time.
90 This may also hide some real bugs.
91
diff --git a/lib/Makefile b/lib/Makefile
index 33a40e40e3ee..2e78277eff9d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o 15 is_single_threaded.o plist.o decompress.o flex_array.o
16 16
17lib-$(CONFIG_MMU) += ioremap.o 17lib-$(CONFIG_MMU) += ioremap.o
18lib-$(CONFIG_SMP) += cpumask.o 18lib-$(CONFIG_SMP) += cpumask.o
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
21 21
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o 24 string_helpers.o gcd.o
25 25
26ifeq ($(CONFIG_DEBUG_KOBJECT),y) 26ifeq ($(CONFIG_DEBUG_KOBJECT),y)
27CFLAGS_kobject.o += -DDEBUG 27CFLAGS_kobject.o += -DDEBUG
@@ -50,6 +50,7 @@ ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
50endif 50endif
51 51
52obj-$(CONFIG_BITREVERSE) += bitrev.o 52obj-$(CONFIG_BITREVERSE) += bitrev.o
53obj-$(CONFIG_RATIONAL) += rational.o
53obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o 54obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
54obj-$(CONFIG_CRC16) += crc16.o 55obj-$(CONFIG_CRC16) += crc16.o
55obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o 56obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
@@ -92,6 +93,10 @@ obj-$(CONFIG_NLATTR) += nlattr.o
92 93
93obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o 94obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
94 95
96obj-$(CONFIG_GENERIC_CSUM) += checksum.o
97
98obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
99
95hostprogs-y := gen_crc32table 100hostprogs-y := gen_crc32table
96clean-files := crc32table.h 101clean-files := crc32table.h
97 102
diff --git a/lib/atomic64.c b/lib/atomic64.c
new file mode 100644
index 000000000000..8bee16ec7524
--- /dev/null
+++ b/lib/atomic64.c
@@ -0,0 +1,186 @@
1/*
2 * Generic implementation of 64-bit atomics using spinlocks,
3 * useful on processors that don't have 64-bit atomic instructions.
4 *
5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/types.h>
13#include <linux/cache.h>
14#include <linux/spinlock.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <asm/atomic.h>
18
19/*
20 * We use a hashed array of spinlocks to provide exclusive access
21 * to each atomic64_t variable. Since this is expected to used on
22 * systems with small numbers of CPUs (<= 4 or so), we use a
23 * relatively small array of 16 spinlocks to avoid wasting too much
24 * memory on the spinlock array.
25 */
26#define NR_LOCKS 16
27
28/*
29 * Ensure each lock is in a separate cacheline.
30 */
31static union {
32 spinlock_t lock;
33 char pad[L1_CACHE_BYTES];
34} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
35
36static inline spinlock_t *lock_addr(const atomic64_t *v)
37{
38 unsigned long addr = (unsigned long) v;
39
40 addr >>= L1_CACHE_SHIFT;
41 addr ^= (addr >> 8) ^ (addr >> 16);
42 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
43}
44
45long long atomic64_read(const atomic64_t *v)
46{
47 unsigned long flags;
48 spinlock_t *lock = lock_addr(v);
49 long long val;
50
51 spin_lock_irqsave(lock, flags);
52 val = v->counter;
53 spin_unlock_irqrestore(lock, flags);
54 return val;
55}
56EXPORT_SYMBOL(atomic64_read);
57
58void atomic64_set(atomic64_t *v, long long i)
59{
60 unsigned long flags;
61 spinlock_t *lock = lock_addr(v);
62
63 spin_lock_irqsave(lock, flags);
64 v->counter = i;
65 spin_unlock_irqrestore(lock, flags);
66}
67EXPORT_SYMBOL(atomic64_set);
68
69void atomic64_add(long long a, atomic64_t *v)
70{
71 unsigned long flags;
72 spinlock_t *lock = lock_addr(v);
73
74 spin_lock_irqsave(lock, flags);
75 v->counter += a;
76 spin_unlock_irqrestore(lock, flags);
77}
78EXPORT_SYMBOL(atomic64_add);
79
80long long atomic64_add_return(long long a, atomic64_t *v)
81{
82 unsigned long flags;
83 spinlock_t *lock = lock_addr(v);
84 long long val;
85
86 spin_lock_irqsave(lock, flags);
87 val = v->counter += a;
88 spin_unlock_irqrestore(lock, flags);
89 return val;
90}
91EXPORT_SYMBOL(atomic64_add_return);
92
93void atomic64_sub(long long a, atomic64_t *v)
94{
95 unsigned long flags;
96 spinlock_t *lock = lock_addr(v);
97
98 spin_lock_irqsave(lock, flags);
99 v->counter -= a;
100 spin_unlock_irqrestore(lock, flags);
101}
102EXPORT_SYMBOL(atomic64_sub);
103
104long long atomic64_sub_return(long long a, atomic64_t *v)
105{
106 unsigned long flags;
107 spinlock_t *lock = lock_addr(v);
108 long long val;
109
110 spin_lock_irqsave(lock, flags);
111 val = v->counter -= a;
112 spin_unlock_irqrestore(lock, flags);
113 return val;
114}
115EXPORT_SYMBOL(atomic64_sub_return);
116
117long long atomic64_dec_if_positive(atomic64_t *v)
118{
119 unsigned long flags;
120 spinlock_t *lock = lock_addr(v);
121 long long val;
122
123 spin_lock_irqsave(lock, flags);
124 val = v->counter - 1;
125 if (val >= 0)
126 v->counter = val;
127 spin_unlock_irqrestore(lock, flags);
128 return val;
129}
130EXPORT_SYMBOL(atomic64_dec_if_positive);
131
132long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
133{
134 unsigned long flags;
135 spinlock_t *lock = lock_addr(v);
136 long long val;
137
138 spin_lock_irqsave(lock, flags);
139 val = v->counter;
140 if (val == o)
141 v->counter = n;
142 spin_unlock_irqrestore(lock, flags);
143 return val;
144}
145EXPORT_SYMBOL(atomic64_cmpxchg);
146
147long long atomic64_xchg(atomic64_t *v, long long new)
148{
149 unsigned long flags;
150 spinlock_t *lock = lock_addr(v);
151 long long val;
152
153 spin_lock_irqsave(lock, flags);
154 val = v->counter;
155 v->counter = new;
156 spin_unlock_irqrestore(lock, flags);
157 return val;
158}
159EXPORT_SYMBOL(atomic64_xchg);
160
161int atomic64_add_unless(atomic64_t *v, long long a, long long u)
162{
163 unsigned long flags;
164 spinlock_t *lock = lock_addr(v);
165 int ret = 1;
166
167 spin_lock_irqsave(lock, flags);
168 if (v->counter != u) {
169 v->counter += a;
170 ret = 0;
171 }
172 spin_unlock_irqrestore(lock, flags);
173 return ret;
174}
175EXPORT_SYMBOL(atomic64_add_unless);
176
177static int init_atomic64_lock(void)
178{
179 int i;
180
181 for (i = 0; i < NR_LOCKS; ++i)
182 spin_lock_init(&atomic64_lock[i].lock);
183 return 0;
184}
185
186pure_initcall(init_atomic64_lock);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 35a1f7ff4149..702565821c99 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -179,14 +179,16 @@ void __bitmap_shift_left(unsigned long *dst,
179} 179}
180EXPORT_SYMBOL(__bitmap_shift_left); 180EXPORT_SYMBOL(__bitmap_shift_left);
181 181
182void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, 182int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
183 const unsigned long *bitmap2, int bits) 183 const unsigned long *bitmap2, int bits)
184{ 184{
185 int k; 185 int k;
186 int nr = BITS_TO_LONGS(bits); 186 int nr = BITS_TO_LONGS(bits);
187 unsigned long result = 0;
187 188
188 for (k = 0; k < nr; k++) 189 for (k = 0; k < nr; k++)
189 dst[k] = bitmap1[k] & bitmap2[k]; 190 result |= (dst[k] = bitmap1[k] & bitmap2[k]);
191 return result != 0;
190} 192}
191EXPORT_SYMBOL(__bitmap_and); 193EXPORT_SYMBOL(__bitmap_and);
192 194
@@ -212,14 +214,16 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
212} 214}
213EXPORT_SYMBOL(__bitmap_xor); 215EXPORT_SYMBOL(__bitmap_xor);
214 216
215void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, 217int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
216 const unsigned long *bitmap2, int bits) 218 const unsigned long *bitmap2, int bits)
217{ 219{
218 int k; 220 int k;
219 int nr = BITS_TO_LONGS(bits); 221 int nr = BITS_TO_LONGS(bits);
222 unsigned long result = 0;
220 223
221 for (k = 0; k < nr; k++) 224 for (k = 0; k < nr; k++)
222 dst[k] = bitmap1[k] & ~bitmap2[k]; 225 result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
226 return result != 0;
223} 227}
224EXPORT_SYMBOL(__bitmap_andnot); 228EXPORT_SYMBOL(__bitmap_andnot);
225 229
diff --git a/lib/checksum.c b/lib/checksum.c
new file mode 100644
index 000000000000..b2e2fd468461
--- /dev/null
+++ b/lib/checksum.c
@@ -0,0 +1,201 @@
1/*
2 *
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * IP/TCP/UDP checksumming routines
8 *
9 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Tom May, <ftom@netcom.com>
12 * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
13 * Lots of code moved from tcp.c and ip.c; see those files
14 * for more names.
15 *
16 * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
17 * Fixed some nasty bugs, causing some horrible crashes.
18 * A: At some points, the sum (%0) was used as
19 * length-counter instead of the length counter
20 * (%1). Thanks to Roman Hodek for pointing this out.
21 * B: GCC seems to mess up if one uses too many
22 * data-registers to hold input values and one tries to
23 * specify d0 and d1 as scratch registers. Letting gcc
24 * choose these registers itself solves the problem.
25 *
26 * This program is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU General Public License
28 * as published by the Free Software Foundation; either version
29 * 2 of the License, or (at your option) any later version.
30 */
31
32/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
33 kills, so most of the assembly has to go. */
34
35#include <linux/module.h>
36#include <net/checksum.h>
37
38#include <asm/byteorder.h>
39
40static inline unsigned short from32to16(unsigned long x)
41{
42 /* add up 16-bit and 16-bit for 16+c bit */
43 x = (x & 0xffff) + (x >> 16);
44 /* add up carry.. */
45 x = (x & 0xffff) + (x >> 16);
46 return x;
47}
48
49static unsigned int do_csum(const unsigned char *buff, int len)
50{
51 int odd, count;
52 unsigned long result = 0;
53
54 if (len <= 0)
55 goto out;
56 odd = 1 & (unsigned long) buff;
57 if (odd) {
58#ifdef __LITTLE_ENDIAN
59 result = *buff;
60#else
61 result += (*buff << 8);
62#endif
63 len--;
64 buff++;
65 }
66 count = len >> 1; /* nr of 16-bit words.. */
67 if (count) {
68 if (2 & (unsigned long) buff) {
69 result += *(unsigned short *) buff;
70 count--;
71 len -= 2;
72 buff += 2;
73 }
74 count >>= 1; /* nr of 32-bit words.. */
75 if (count) {
76 unsigned long carry = 0;
77 do {
78 unsigned long w = *(unsigned int *) buff;
79 count--;
80 buff += 4;
81 result += carry;
82 result += w;
83 carry = (w > result);
84 } while (count);
85 result += carry;
86 result = (result & 0xffff) + (result >> 16);
87 }
88 if (len & 2) {
89 result += *(unsigned short *) buff;
90 buff += 2;
91 }
92 }
93 if (len & 1)
94#ifdef __LITTLE_ENDIAN
95 result += *buff;
96#else
97 result += (*buff << 8);
98#endif
99 result = from32to16(result);
100 if (odd)
101 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
102out:
103 return result;
104}
105
106/*
107 * This is a version of ip_compute_csum() optimized for IP headers,
108 * which always checksum on 4 octet boundaries.
109 */
110__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
111{
112 return (__force __sum16)~do_csum(iph, ihl*4);
113}
114EXPORT_SYMBOL(ip_fast_csum);
115
116/*
117 * computes the checksum of a memory block at buff, length len,
118 * and adds in "sum" (32-bit)
119 *
120 * returns a 32-bit number suitable for feeding into itself
121 * or csum_tcpudp_magic
122 *
123 * this function must be called with even lengths, except
124 * for the last fragment, which may be odd
125 *
126 * it's best to have buff aligned on a 32-bit boundary
127 */
128__wsum csum_partial(const void *buff, int len, __wsum wsum)
129{
130 unsigned int sum = (__force unsigned int)wsum;
131 unsigned int result = do_csum(buff, len);
132
133 /* add in old sum, and carry.. */
134 result += sum;
135 if (sum > result)
136 result += 1;
137 return (__force __wsum)result;
138}
139EXPORT_SYMBOL(csum_partial);
140
141/*
142 * this routine is used for miscellaneous IP-like checksums, mainly
143 * in icmp.c
144 */
145__sum16 ip_compute_csum(const void *buff, int len)
146{
147 return (__force __sum16)~do_csum(buff, len);
148}
149EXPORT_SYMBOL(ip_compute_csum);
150
151/*
152 * copy from fs while checksumming, otherwise like csum_partial
153 */
154__wsum
155csum_partial_copy_from_user(const void __user *src, void *dst, int len,
156 __wsum sum, int *csum_err)
157{
158 int missing;
159
160 missing = __copy_from_user(dst, src, len);
161 if (missing) {
162 memset(dst + len - missing, 0, missing);
163 *csum_err = -EFAULT;
164 } else
165 *csum_err = 0;
166
167 return csum_partial(dst, len, sum);
168}
169EXPORT_SYMBOL(csum_partial_copy_from_user);
170
171/*
172 * copy from ds while checksumming, otherwise like csum_partial
173 */
174__wsum
175csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
176{
177 memcpy(dst, src, len);
178 return csum_partial(dst, len, sum);
179}
180EXPORT_SYMBOL(csum_partial_copy);
181
182#ifndef csum_tcpudp_nofold
183__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
184 unsigned short len,
185 unsigned short proto,
186 __wsum sum)
187{
188 unsigned long long s = (__force u32)sum;
189
190 s += (__force u32)saddr;
191 s += (__force u32)daddr;
192#ifdef __BIG_ENDIAN
193 s += proto + len;
194#else
195 s += (proto + len) << 8;
196#endif
197 s += (s >> 32);
198 return (__force __wsum)s;
199}
200EXPORT_SYMBOL(csum_tcpudp_nofold);
201#endif
diff --git a/lib/cpumask.c b/lib/cpumask.c
index eb23aaa0c7b8..7bb4142a502f 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -92,15 +92,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
92 */ 92 */
93bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) 93bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
94{ 94{
95 if (likely(slab_is_available())) 95 *mask = kmalloc_node(cpumask_size(), flags, node);
96 *mask = kmalloc_node(cpumask_size(), flags, node); 96
97 else {
98#ifdef CONFIG_DEBUG_PER_CPU_MAPS
99 printk(KERN_ERR
100 "=> alloc_cpumask_var: kmalloc not available!\n");
101#endif
102 *mask = NULL;
103 }
104#ifdef CONFIG_DEBUG_PER_CPU_MAPS 97#ifdef CONFIG_DEBUG_PER_CPU_MAPS
105 if (!*mask) { 98 if (!*mask) {
106 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); 99 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index a65c31455541..e73822aa6e9a 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -19,11 +19,10 @@
19 */ 19 */
20int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 20int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
21{ 21{
22#ifdef CONFIG_SMP
23 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ 22 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
24 if (atomic_add_unless(atomic, -1, 1)) 23 if (atomic_add_unless(atomic, -1, 1))
25 return 0; 24 return 0;
26#endif 25
27 /* Otherwise do it the slow way */ 26 /* Otherwise do it the slow way */
28 spin_lock(lock); 27 spin_lock(lock);
29 if (atomic_dec_and_test(atomic)) 28 if (atomic_dec_and_test(atomic))
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 708e2a86d87b..600f473a5610 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -45,12 +45,14 @@
45*/ 45*/
46 46
47 47
48#ifndef STATIC 48#ifdef STATIC
49#define PREBOOT
50#else
49#include <linux/decompress/bunzip2.h> 51#include <linux/decompress/bunzip2.h>
50#endif /* !STATIC */ 52#include <linux/slab.h>
53#endif /* STATIC */
51 54
52#include <linux/decompress/mm.h> 55#include <linux/decompress/mm.h>
53#include <linux/slab.h>
54 56
55#ifndef INT_MAX 57#ifndef INT_MAX
56#define INT_MAX 0x7fffffff 58#define INT_MAX 0x7fffffff
@@ -681,9 +683,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
681 set_error_fn(error_fn); 683 set_error_fn(error_fn);
682 if (flush) 684 if (flush)
683 outbuf = malloc(BZIP2_IOBUF_SIZE); 685 outbuf = malloc(BZIP2_IOBUF_SIZE);
684 else 686
685 len -= 4; /* Uncompressed size hack active in pre-boot
686 environment */
687 if (!outbuf) { 687 if (!outbuf) {
688 error("Could not allocate output bufer"); 688 error("Could not allocate output bufer");
689 return -1; 689 return -1;
@@ -733,4 +733,14 @@ exit_0:
733 return i; 733 return i;
734} 734}
735 735
736#define decompress bunzip2 736#ifdef PREBOOT
737STATIC int INIT decompress(unsigned char *buf, int len,
738 int(*fill)(void*, unsigned int),
739 int(*flush)(void*, unsigned int),
740 unsigned char *outbuf,
741 int *pos,
742 void(*error_fn)(char *x))
743{
744 return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error_fn);
745}
746#endif
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index e36b296fc9f8..68dfce59c1b8 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -19,13 +19,13 @@
19#include "zlib_inflate/inflate.h" 19#include "zlib_inflate/inflate.h"
20 20
21#include "zlib_inflate/infutil.h" 21#include "zlib_inflate/infutil.h"
22#include <linux/slab.h>
22 23
23#endif /* STATIC */ 24#endif /* STATIC */
24 25
25#include <linux/decompress/mm.h> 26#include <linux/decompress/mm.h>
26#include <linux/slab.h>
27 27
28#define INBUF_LEN (16*1024) 28#define GZIP_IOBUF_SIZE (16*1024)
29 29
30/* Included from initramfs et al code */ 30/* Included from initramfs et al code */
31STATIC int INIT gunzip(unsigned char *buf, int len, 31STATIC int INIT gunzip(unsigned char *buf, int len,
@@ -55,7 +55,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
55 if (buf) 55 if (buf)
56 zbuf = buf; 56 zbuf = buf;
57 else { 57 else {
58 zbuf = malloc(INBUF_LEN); 58 zbuf = malloc(GZIP_IOBUF_SIZE);
59 len = 0; 59 len = 0;
60 } 60 }
61 if (!zbuf) { 61 if (!zbuf) {
@@ -77,7 +77,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
77 } 77 }
78 78
79 if (len == 0) 79 if (len == 0)
80 len = fill(zbuf, INBUF_LEN); 80 len = fill(zbuf, GZIP_IOBUF_SIZE);
81 81
82 /* verify the gzip header */ 82 /* verify the gzip header */
83 if (len < 10 || 83 if (len < 10 ||
@@ -113,7 +113,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
113 while (rc == Z_OK) { 113 while (rc == Z_OK) {
114 if (strm->avail_in == 0) { 114 if (strm->avail_in == 0) {
115 /* TODO: handle case where both pos and fill are set */ 115 /* TODO: handle case where both pos and fill are set */
116 len = fill(zbuf, INBUF_LEN); 116 len = fill(zbuf, GZIP_IOBUF_SIZE);
117 if (len < 0) { 117 if (len < 0) {
118 rc = -1; 118 rc = -1;
119 error("read error"); 119 error("read error");
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 32123a1340e6..0b954e04bd30 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -29,12 +29,14 @@
29 *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 29 *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 */ 30 */
31 31
32#ifndef STATIC 32#ifdef STATIC
33#define PREBOOT
34#else
33#include <linux/decompress/unlzma.h> 35#include <linux/decompress/unlzma.h>
36#include <linux/slab.h>
34#endif /* STATIC */ 37#endif /* STATIC */
35 38
36#include <linux/decompress/mm.h> 39#include <linux/decompress/mm.h>
37#include <linux/slab.h>
38 40
39#define MIN(a, b) (((a) < (b)) ? (a) : (b)) 41#define MIN(a, b) (((a) < (b)) ? (a) : (b))
40 42
@@ -543,9 +545,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
543 int ret = -1; 545 int ret = -1;
544 546
545 set_error_fn(error_fn); 547 set_error_fn(error_fn);
546 if (!flush) 548
547 in_len -= 4; /* Uncompressed size hack active in pre-boot
548 environment */
549 if (buf) 549 if (buf)
550 inbuf = buf; 550 inbuf = buf;
551 else 551 else
@@ -645,4 +645,15 @@ exit_0:
645 return ret; 645 return ret;
646} 646}
647 647
648#define decompress unlzma 648#ifdef PREBOOT
649STATIC int INIT decompress(unsigned char *buf, int in_len,
650 int(*fill)(void*, unsigned int),
651 int(*flush)(void*, unsigned int),
652 unsigned char *output,
653 int *posp,
654 void(*error_fn)(char *x)
655 )
656{
657 return unlzma(buf, in_len - 4, fill, flush, output, posp, error_fn);
658}
659#endif
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 69da09a085a1..58a9f9fc609a 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -23,9 +23,11 @@
23#include <linux/dma-debug.h> 23#include <linux/dma-debug.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/debugfs.h> 25#include <linux/debugfs.h>
26#include <linux/uaccess.h>
26#include <linux/device.h> 27#include <linux/device.h>
27#include <linux/types.h> 28#include <linux/types.h>
28#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/ctype.h>
29#include <linux/list.h> 31#include <linux/list.h>
30#include <linux/slab.h> 32#include <linux/slab.h>
31 33
@@ -85,6 +87,7 @@ static u32 show_num_errors = 1;
85 87
86static u32 num_free_entries; 88static u32 num_free_entries;
87static u32 min_free_entries; 89static u32 min_free_entries;
90static u32 nr_total_entries;
88 91
89/* number of preallocated entries requested by kernel cmdline */ 92/* number of preallocated entries requested by kernel cmdline */
90static u32 req_entries; 93static u32 req_entries;
@@ -97,6 +100,16 @@ static struct dentry *show_all_errors_dent __read_mostly;
97static struct dentry *show_num_errors_dent __read_mostly; 100static struct dentry *show_num_errors_dent __read_mostly;
98static struct dentry *num_free_entries_dent __read_mostly; 101static struct dentry *num_free_entries_dent __read_mostly;
99static struct dentry *min_free_entries_dent __read_mostly; 102static struct dentry *min_free_entries_dent __read_mostly;
103static struct dentry *filter_dent __read_mostly;
104
105/* per-driver filter related state */
106
107#define NAME_MAX_LEN 64
108
109static char current_driver_name[NAME_MAX_LEN] __read_mostly;
110static struct device_driver *current_driver __read_mostly;
111
112static DEFINE_RWLOCK(driver_name_lock);
100 113
101static const char *type2name[4] = { "single", "page", 114static const char *type2name[4] = { "single", "page",
102 "scather-gather", "coherent" }; 115 "scather-gather", "coherent" };
@@ -104,6 +117,11 @@ static const char *type2name[4] = { "single", "page",
104static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", 117static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
105 "DMA_FROM_DEVICE", "DMA_NONE" }; 118 "DMA_FROM_DEVICE", "DMA_NONE" };
106 119
120/* little merge helper - remove it after the merge window */
121#ifndef BUS_NOTIFY_UNBOUND_DRIVER
122#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
123#endif
124
107/* 125/*
108 * The access to some variables in this macro is racy. We can't use atomic_t 126 * The access to some variables in this macro is racy. We can't use atomic_t
109 * here because all these variables are exported to debugfs. Some of them even 127 * here because all these variables are exported to debugfs. Some of them even
@@ -121,22 +139,65 @@ static inline void dump_entry_trace(struct dma_debug_entry *entry)
121{ 139{
122#ifdef CONFIG_STACKTRACE 140#ifdef CONFIG_STACKTRACE
123 if (entry) { 141 if (entry) {
124 printk(KERN_WARNING "Mapped at:\n"); 142 pr_warning("Mapped at:\n");
125 print_stack_trace(&entry->stacktrace, 0); 143 print_stack_trace(&entry->stacktrace, 0);
126 } 144 }
127#endif 145#endif
128} 146}
129 147
130#define err_printk(dev, entry, format, arg...) do { \ 148static bool driver_filter(struct device *dev)
131 error_count += 1; \ 149{
132 if (show_all_errors || show_num_errors > 0) { \ 150 struct device_driver *drv;
133 WARN(1, "%s %s: " format, \ 151 unsigned long flags;
134 dev_driver_string(dev), \ 152 bool ret;
135 dev_name(dev) , ## arg); \ 153
136 dump_entry_trace(entry); \ 154 /* driver filter off */
137 } \ 155 if (likely(!current_driver_name[0]))
138 if (!show_all_errors && show_num_errors > 0) \ 156 return true;
139 show_num_errors -= 1; \ 157
158 /* driver filter on and initialized */
159 if (current_driver && dev && dev->driver == current_driver)
160 return true;
161
162 /* driver filter on, but we can't filter on a NULL device... */
163 if (!dev)
164 return false;
165
166 if (current_driver || !current_driver_name[0])
167 return false;
168
169 /* driver filter on but not yet initialized */
170 drv = get_driver(dev->driver);
171 if (!drv)
172 return false;
173
174 /* lock to protect against change of current_driver_name */
175 read_lock_irqsave(&driver_name_lock, flags);
176
177 ret = false;
178 if (drv->name &&
179 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
180 current_driver = drv;
181 ret = true;
182 }
183
184 read_unlock_irqrestore(&driver_name_lock, flags);
185 put_driver(drv);
186
187 return ret;
188}
189
190#define err_printk(dev, entry, format, arg...) do { \
191 error_count += 1; \
192 if (driver_filter(dev) && \
193 (show_all_errors || show_num_errors > 0)) { \
194 WARN(1, "%s %s: " format, \
195 dev ? dev_driver_string(dev) : "NULL", \
196 dev ? dev_name(dev) : "NULL", ## arg); \
197 dump_entry_trace(entry); \
198 } \
199 if (!show_all_errors && show_num_errors > 0) \
200 show_num_errors -= 1; \
140 } while (0); 201 } while (0);
141 202
142/* 203/*
@@ -185,15 +246,51 @@ static void put_hash_bucket(struct hash_bucket *bucket,
185static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, 246static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
186 struct dma_debug_entry *ref) 247 struct dma_debug_entry *ref)
187{ 248{
188 struct dma_debug_entry *entry; 249 struct dma_debug_entry *entry, *ret = NULL;
250 int matches = 0, match_lvl, last_lvl = 0;
189 251
190 list_for_each_entry(entry, &bucket->list, list) { 252 list_for_each_entry(entry, &bucket->list, list) {
191 if ((entry->dev_addr == ref->dev_addr) && 253 if ((entry->dev_addr != ref->dev_addr) ||
192 (entry->dev == ref->dev)) 254 (entry->dev != ref->dev))
255 continue;
256
257 /*
258 * Some drivers map the same physical address multiple
259 * times. Without a hardware IOMMU this results in the
260 * same device addresses being put into the dma-debug
261 * hash multiple times too. This can result in false
262 * positives being reported. Therfore we implement a
263 * best-fit algorithm here which returns the entry from
264 * the hash which fits best to the reference value
265 * instead of the first-fit.
266 */
267 matches += 1;
268 match_lvl = 0;
269 entry->size == ref->size ? ++match_lvl : 0;
270 entry->type == ref->type ? ++match_lvl : 0;
271 entry->direction == ref->direction ? ++match_lvl : 0;
272 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
273
274 if (match_lvl == 4) {
275 /* perfect-fit - return the result */
193 return entry; 276 return entry;
277 } else if (match_lvl > last_lvl) {
278 /*
279 * We found an entry that fits better then the
280 * previous one
281 */
282 last_lvl = match_lvl;
283 ret = entry;
284 }
194 } 285 }
195 286
196 return NULL; 287 /*
288 * If we have multiple matches but no perfect-fit, just return
289 * NULL.
290 */
291 ret = (matches == 1) ? ret : NULL;
292
293 return ret;
197} 294}
198 295
199/* 296/*
@@ -257,6 +354,21 @@ static void add_dma_entry(struct dma_debug_entry *entry)
257 put_hash_bucket(bucket, &flags); 354 put_hash_bucket(bucket, &flags);
258} 355}
259 356
357static struct dma_debug_entry *__dma_entry_alloc(void)
358{
359 struct dma_debug_entry *entry;
360
361 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
362 list_del(&entry->list);
363 memset(entry, 0, sizeof(*entry));
364
365 num_free_entries -= 1;
366 if (num_free_entries < min_free_entries)
367 min_free_entries = num_free_entries;
368
369 return entry;
370}
371
260/* struct dma_entry allocator 372/* struct dma_entry allocator
261 * 373 *
262 * The next two functions implement the allocator for 374 * The next two functions implement the allocator for
@@ -270,15 +382,12 @@ static struct dma_debug_entry *dma_entry_alloc(void)
270 spin_lock_irqsave(&free_entries_lock, flags); 382 spin_lock_irqsave(&free_entries_lock, flags);
271 383
272 if (list_empty(&free_entries)) { 384 if (list_empty(&free_entries)) {
273 printk(KERN_ERR "DMA-API: debugging out of memory " 385 pr_err("DMA-API: debugging out of memory - disabling\n");
274 "- disabling\n");
275 global_disable = true; 386 global_disable = true;
276 goto out; 387 goto out;
277 } 388 }
278 389
279 entry = list_entry(free_entries.next, struct dma_debug_entry, list); 390 entry = __dma_entry_alloc();
280 list_del(&entry->list);
281 memset(entry, 0, sizeof(*entry));
282 391
283#ifdef CONFIG_STACKTRACE 392#ifdef CONFIG_STACKTRACE
284 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; 393 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
@@ -286,9 +395,6 @@ static struct dma_debug_entry *dma_entry_alloc(void)
286 entry->stacktrace.skip = 2; 395 entry->stacktrace.skip = 2;
287 save_stack_trace(&entry->stacktrace); 396 save_stack_trace(&entry->stacktrace);
288#endif 397#endif
289 num_free_entries -= 1;
290 if (num_free_entries < min_free_entries)
291 min_free_entries = num_free_entries;
292 398
293out: 399out:
294 spin_unlock_irqrestore(&free_entries_lock, flags); 400 spin_unlock_irqrestore(&free_entries_lock, flags);
@@ -310,6 +416,53 @@ static void dma_entry_free(struct dma_debug_entry *entry)
310 spin_unlock_irqrestore(&free_entries_lock, flags); 416 spin_unlock_irqrestore(&free_entries_lock, flags);
311} 417}
312 418
419int dma_debug_resize_entries(u32 num_entries)
420{
421 int i, delta, ret = 0;
422 unsigned long flags;
423 struct dma_debug_entry *entry;
424 LIST_HEAD(tmp);
425
426 spin_lock_irqsave(&free_entries_lock, flags);
427
428 if (nr_total_entries < num_entries) {
429 delta = num_entries - nr_total_entries;
430
431 spin_unlock_irqrestore(&free_entries_lock, flags);
432
433 for (i = 0; i < delta; i++) {
434 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
435 if (!entry)
436 break;
437
438 list_add_tail(&entry->list, &tmp);
439 }
440
441 spin_lock_irqsave(&free_entries_lock, flags);
442
443 list_splice(&tmp, &free_entries);
444 nr_total_entries += i;
445 num_free_entries += i;
446 } else {
447 delta = nr_total_entries - num_entries;
448
449 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
450 entry = __dma_entry_alloc();
451 kfree(entry);
452 }
453
454 nr_total_entries -= i;
455 }
456
457 if (nr_total_entries != num_entries)
458 ret = 1;
459
460 spin_unlock_irqrestore(&free_entries_lock, flags);
461
462 return ret;
463}
464EXPORT_SYMBOL(dma_debug_resize_entries);
465
313/* 466/*
314 * DMA-API debugging init code 467 * DMA-API debugging init code
315 * 468 *
@@ -334,8 +487,7 @@ static int prealloc_memory(u32 num_entries)
334 num_free_entries = num_entries; 487 num_free_entries = num_entries;
335 min_free_entries = num_entries; 488 min_free_entries = num_entries;
336 489
337 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", 490 pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
338 num_entries);
339 491
340 return 0; 492 return 0;
341 493
@@ -349,11 +501,102 @@ out_err:
349 return -ENOMEM; 501 return -ENOMEM;
350} 502}
351 503
504static ssize_t filter_read(struct file *file, char __user *user_buf,
505 size_t count, loff_t *ppos)
506{
507 char buf[NAME_MAX_LEN + 1];
508 unsigned long flags;
509 int len;
510
511 if (!current_driver_name[0])
512 return 0;
513
514 /*
515 * We can't copy to userspace directly because current_driver_name can
516 * only be read under the driver_name_lock with irqs disabled. So
517 * create a temporary copy first.
518 */
519 read_lock_irqsave(&driver_name_lock, flags);
520 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
521 read_unlock_irqrestore(&driver_name_lock, flags);
522
523 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
524}
525
526static ssize_t filter_write(struct file *file, const char __user *userbuf,
527 size_t count, loff_t *ppos)
528{
529 char buf[NAME_MAX_LEN];
530 unsigned long flags;
531 size_t len;
532 int i;
533
534 /*
535 * We can't copy from userspace directly. Access to
536 * current_driver_name is protected with a write_lock with irqs
537 * disabled. Since copy_from_user can fault and may sleep we
538 * need to copy to temporary buffer first
539 */
540 len = min(count, (size_t)(NAME_MAX_LEN - 1));
541 if (copy_from_user(buf, userbuf, len))
542 return -EFAULT;
543
544 buf[len] = 0;
545
546 write_lock_irqsave(&driver_name_lock, flags);
547
548 /*
549 * Now handle the string we got from userspace very carefully.
550 * The rules are:
551 * - only use the first token we got
552 * - token delimiter is everything looking like a space
553 * character (' ', '\n', '\t' ...)
554 *
555 */
556 if (!isalnum(buf[0])) {
557 /*
558 * If the first character userspace gave us is not
559 * alphanumerical then assume the filter should be
560 * switched off.
561 */
562 if (current_driver_name[0])
563 pr_info("DMA-API: switching off dma-debug driver filter\n");
564 current_driver_name[0] = 0;
565 current_driver = NULL;
566 goto out_unlock;
567 }
568
569 /*
570 * Now parse out the first token and use it as the name for the
571 * driver to filter for.
572 */
573 for (i = 0; i < NAME_MAX_LEN; ++i) {
574 current_driver_name[i] = buf[i];
575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
576 break;
577 }
578 current_driver_name[i] = 0;
579 current_driver = NULL;
580
581 pr_info("DMA-API: enable driver filter for driver [%s]\n",
582 current_driver_name);
583
584out_unlock:
585 write_unlock_irqrestore(&driver_name_lock, flags);
586
587 return count;
588}
589
590const struct file_operations filter_fops = {
591 .read = filter_read,
592 .write = filter_write,
593};
594
352static int dma_debug_fs_init(void) 595static int dma_debug_fs_init(void)
353{ 596{
354 dma_debug_dent = debugfs_create_dir("dma-api", NULL); 597 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
355 if (!dma_debug_dent) { 598 if (!dma_debug_dent) {
356 printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); 599 pr_err("DMA-API: can not create debugfs directory\n");
357 return -ENOMEM; 600 return -ENOMEM;
358 } 601 }
359 602
@@ -392,6 +635,11 @@ static int dma_debug_fs_init(void)
392 if (!min_free_entries_dent) 635 if (!min_free_entries_dent)
393 goto out_err; 636 goto out_err;
394 637
638 filter_dent = debugfs_create_file("driver_filter", 0644,
639 dma_debug_dent, NULL, &filter_fops);
640 if (!filter_dent)
641 goto out_err;
642
395 return 0; 643 return 0;
396 644
397out_err: 645out_err:
@@ -400,9 +648,64 @@ out_err:
400 return -ENOMEM; 648 return -ENOMEM;
401} 649}
402 650
651static int device_dma_allocations(struct device *dev)
652{
653 struct dma_debug_entry *entry;
654 unsigned long flags;
655 int count = 0, i;
656
657 local_irq_save(flags);
658
659 for (i = 0; i < HASH_SIZE; ++i) {
660 spin_lock(&dma_entry_hash[i].lock);
661 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
662 if (entry->dev == dev)
663 count += 1;
664 }
665 spin_unlock(&dma_entry_hash[i].lock);
666 }
667
668 local_irq_restore(flags);
669
670 return count;
671}
672
673static int dma_debug_device_change(struct notifier_block *nb,
674 unsigned long action, void *data)
675{
676 struct device *dev = data;
677 int count;
678
679
680 switch (action) {
681 case BUS_NOTIFY_UNBOUND_DRIVER:
682 count = device_dma_allocations(dev);
683 if (count == 0)
684 break;
685 err_printk(dev, NULL, "DMA-API: device driver has pending "
686 "DMA allocations while released from device "
687 "[count=%d]\n", count);
688 break;
689 default:
690 break;
691 }
692
693 return 0;
694}
695
403void dma_debug_add_bus(struct bus_type *bus) 696void dma_debug_add_bus(struct bus_type *bus)
404{ 697{
405 /* FIXME: register notifier */ 698 struct notifier_block *nb;
699
700 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
701 if (nb == NULL) {
702 pr_err("dma_debug_add_bus: out of memory\n");
703 return;
704 }
705
706 nb->notifier_call = dma_debug_device_change;
707
708 bus_register_notifier(bus, nb);
406} 709}
407 710
408/* 711/*
@@ -417,12 +720,11 @@ void dma_debug_init(u32 num_entries)
417 720
418 for (i = 0; i < HASH_SIZE; ++i) { 721 for (i = 0; i < HASH_SIZE; ++i) {
419 INIT_LIST_HEAD(&dma_entry_hash[i].list); 722 INIT_LIST_HEAD(&dma_entry_hash[i].list);
420 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; 723 spin_lock_init(&dma_entry_hash[i].lock);
421 } 724 }
422 725
423 if (dma_debug_fs_init() != 0) { 726 if (dma_debug_fs_init() != 0) {
424 printk(KERN_ERR "DMA-API: error creating debugfs entries " 727 pr_err("DMA-API: error creating debugfs entries - disabling\n");
425 "- disabling\n");
426 global_disable = true; 728 global_disable = true;
427 729
428 return; 730 return;
@@ -432,14 +734,15 @@ void dma_debug_init(u32 num_entries)
432 num_entries = req_entries; 734 num_entries = req_entries;
433 735
434 if (prealloc_memory(num_entries) != 0) { 736 if (prealloc_memory(num_entries) != 0) {
435 printk(KERN_ERR "DMA-API: debugging out of memory error " 737 pr_err("DMA-API: debugging out of memory error - disabled\n");
436 "- disabled\n");
437 global_disable = true; 738 global_disable = true;
438 739
439 return; 740 return;
440 } 741 }
441 742
442 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); 743 nr_total_entries = num_free_entries;
744
745 pr_info("DMA-API: debugging enabled by kernel config\n");
443} 746}
444 747
445static __init int dma_debug_cmdline(char *str) 748static __init int dma_debug_cmdline(char *str)
@@ -448,8 +751,7 @@ static __init int dma_debug_cmdline(char *str)
448 return -EINVAL; 751 return -EINVAL;
449 752
450 if (strncmp(str, "off", 3) == 0) { 753 if (strncmp(str, "off", 3) == 0) {
451 printk(KERN_INFO "DMA-API: debugging disabled on kernel " 754 pr_info("DMA-API: debugging disabled on kernel command line\n");
452 "command line\n");
453 global_disable = true; 755 global_disable = true;
454 } 756 }
455 757
@@ -558,90 +860,85 @@ static void check_for_stack(struct device *dev, void *addr)
558 "stack [addr=%p]\n", addr); 860 "stack [addr=%p]\n", addr);
559} 861}
560 862
561static inline bool overlap(void *addr, u64 size, void *start, void *end) 863static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
562{ 864{
563 void *addr2 = (char *)addr + size; 865 unsigned long a1 = (unsigned long)addr;
866 unsigned long b1 = a1 + len;
867 unsigned long a2 = (unsigned long)start;
868 unsigned long b2 = (unsigned long)end;
564 869
565 return ((addr >= start && addr < end) || 870 return !(b1 <= a2 || a1 >= b2);
566 (addr2 >= start && addr2 < end) ||
567 ((addr < start) && (addr2 >= end)));
568} 871}
569 872
570static void check_for_illegal_area(struct device *dev, void *addr, u64 size) 873static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
571{ 874{
572 if (overlap(addr, size, _text, _etext) || 875 if (overlap(addr, len, _text, _etext) ||
573 overlap(addr, size, __start_rodata, __end_rodata)) 876 overlap(addr, len, __start_rodata, __end_rodata))
574 err_printk(dev, NULL, "DMA-API: device driver maps " 877 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
575 "memory from kernel text or rodata "
576 "[addr=%p] [size=%llu]\n", addr, size);
577} 878}
578 879
579static void check_sync(struct device *dev, dma_addr_t addr, 880static void check_sync(struct device *dev,
580 u64 size, u64 offset, int direction, bool to_cpu) 881 struct dma_debug_entry *ref,
882 bool to_cpu)
581{ 883{
582 struct dma_debug_entry ref = {
583 .dev = dev,
584 .dev_addr = addr,
585 .size = size,
586 .direction = direction,
587 };
588 struct dma_debug_entry *entry; 884 struct dma_debug_entry *entry;
589 struct hash_bucket *bucket; 885 struct hash_bucket *bucket;
590 unsigned long flags; 886 unsigned long flags;
591 887
592 bucket = get_hash_bucket(&ref, &flags); 888 bucket = get_hash_bucket(ref, &flags);
593 889
594 entry = hash_bucket_find(bucket, &ref); 890 entry = hash_bucket_find(bucket, ref);
595 891
596 if (!entry) { 892 if (!entry) {
597 err_printk(dev, NULL, "DMA-API: device driver tries " 893 err_printk(dev, NULL, "DMA-API: device driver tries "
598 "to sync DMA memory it has not allocated " 894 "to sync DMA memory it has not allocated "
599 "[device address=0x%016llx] [size=%llu bytes]\n", 895 "[device address=0x%016llx] [size=%llu bytes]\n",
600 (unsigned long long)addr, size); 896 (unsigned long long)ref->dev_addr, ref->size);
601 goto out; 897 goto out;
602 } 898 }
603 899
604 if ((offset + size) > entry->size) { 900 if (ref->size > entry->size) {
605 err_printk(dev, entry, "DMA-API: device driver syncs" 901 err_printk(dev, entry, "DMA-API: device driver syncs"
606 " DMA memory outside allocated range " 902 " DMA memory outside allocated range "
607 "[device address=0x%016llx] " 903 "[device address=0x%016llx] "
608 "[allocation size=%llu bytes] [sync offset=%llu] " 904 "[allocation size=%llu bytes] "
609 "[sync size=%llu]\n", entry->dev_addr, entry->size, 905 "[sync offset+size=%llu]\n",
610 offset, size); 906 entry->dev_addr, entry->size,
907 ref->size);
611 } 908 }
612 909
613 if (direction != entry->direction) { 910 if (ref->direction != entry->direction) {
614 err_printk(dev, entry, "DMA-API: device driver syncs " 911 err_printk(dev, entry, "DMA-API: device driver syncs "
615 "DMA memory with different direction " 912 "DMA memory with different direction "
616 "[device address=0x%016llx] [size=%llu bytes] " 913 "[device address=0x%016llx] [size=%llu bytes] "
617 "[mapped with %s] [synced with %s]\n", 914 "[mapped with %s] [synced with %s]\n",
618 (unsigned long long)addr, entry->size, 915 (unsigned long long)ref->dev_addr, entry->size,
619 dir2name[entry->direction], 916 dir2name[entry->direction],
620 dir2name[direction]); 917 dir2name[ref->direction]);
621 } 918 }
622 919
623 if (entry->direction == DMA_BIDIRECTIONAL) 920 if (entry->direction == DMA_BIDIRECTIONAL)
624 goto out; 921 goto out;
625 922
626 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 923 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
627 !(direction == DMA_TO_DEVICE)) 924 !(ref->direction == DMA_TO_DEVICE))
628 err_printk(dev, entry, "DMA-API: device driver syncs " 925 err_printk(dev, entry, "DMA-API: device driver syncs "
629 "device read-only DMA memory for cpu " 926 "device read-only DMA memory for cpu "
630 "[device address=0x%016llx] [size=%llu bytes] " 927 "[device address=0x%016llx] [size=%llu bytes] "
631 "[mapped with %s] [synced with %s]\n", 928 "[mapped with %s] [synced with %s]\n",
632 (unsigned long long)addr, entry->size, 929 (unsigned long long)ref->dev_addr, entry->size,
633 dir2name[entry->direction], 930 dir2name[entry->direction],
634 dir2name[direction]); 931 dir2name[ref->direction]);
635 932
636 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 933 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
637 !(direction == DMA_FROM_DEVICE)) 934 !(ref->direction == DMA_FROM_DEVICE))
638 err_printk(dev, entry, "DMA-API: device driver syncs " 935 err_printk(dev, entry, "DMA-API: device driver syncs "
639 "device write-only DMA memory to device " 936 "device write-only DMA memory to device "
640 "[device address=0x%016llx] [size=%llu bytes] " 937 "[device address=0x%016llx] [size=%llu bytes] "
641 "[mapped with %s] [synced with %s]\n", 938 "[mapped with %s] [synced with %s]\n",
642 (unsigned long long)addr, entry->size, 939 (unsigned long long)ref->dev_addr, entry->size,
643 dir2name[entry->direction], 940 dir2name[entry->direction],
644 dir2name[direction]); 941 dir2name[ref->direction]);
645 942
646out: 943out:
647 put_hash_bucket(bucket, &flags); 944 put_hash_bucket(bucket, &flags);
@@ -675,7 +972,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
675 entry->type = dma_debug_single; 972 entry->type = dma_debug_single;
676 973
677 if (!PageHighMem(page)) { 974 if (!PageHighMem(page)) {
678 void *addr = ((char *)page_address(page)) + offset; 975 void *addr = page_address(page) + offset;
976
679 check_for_stack(dev, addr); 977 check_for_stack(dev, addr);
680 check_for_illegal_area(dev, addr, size); 978 check_for_illegal_area(dev, addr, size);
681 } 979 }
@@ -723,15 +1021,15 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
723 entry->type = dma_debug_sg; 1021 entry->type = dma_debug_sg;
724 entry->dev = dev; 1022 entry->dev = dev;
725 entry->paddr = sg_phys(s); 1023 entry->paddr = sg_phys(s);
726 entry->size = s->length; 1024 entry->size = sg_dma_len(s);
727 entry->dev_addr = s->dma_address; 1025 entry->dev_addr = sg_dma_address(s);
728 entry->direction = direction; 1026 entry->direction = direction;
729 entry->sg_call_ents = nents; 1027 entry->sg_call_ents = nents;
730 entry->sg_mapped_ents = mapped_ents; 1028 entry->sg_mapped_ents = mapped_ents;
731 1029
732 if (!PageHighMem(sg_page(s))) { 1030 if (!PageHighMem(sg_page(s))) {
733 check_for_stack(dev, sg_virt(s)); 1031 check_for_stack(dev, sg_virt(s));
734 check_for_illegal_area(dev, sg_virt(s), s->length); 1032 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
735 } 1033 }
736 1034
737 add_dma_entry(entry); 1035 add_dma_entry(entry);
@@ -739,13 +1037,30 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
739} 1037}
740EXPORT_SYMBOL(debug_dma_map_sg); 1038EXPORT_SYMBOL(debug_dma_map_sg);
741 1039
1040static int get_nr_mapped_entries(struct device *dev,
1041 struct dma_debug_entry *ref)
1042{
1043 struct dma_debug_entry *entry;
1044 struct hash_bucket *bucket;
1045 unsigned long flags;
1046 int mapped_ents;
1047
1048 bucket = get_hash_bucket(ref, &flags);
1049 entry = hash_bucket_find(bucket, ref);
1050 mapped_ents = 0;
1051
1052 if (entry)
1053 mapped_ents = entry->sg_mapped_ents;
1054 put_hash_bucket(bucket, &flags);
1055
1056 return mapped_ents;
1057}
1058
742void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 1059void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
743 int nelems, int dir) 1060 int nelems, int dir)
744{ 1061{
745 struct dma_debug_entry *entry;
746 struct scatterlist *s; 1062 struct scatterlist *s;
747 int mapped_ents = 0, i; 1063 int mapped_ents = 0, i;
748 unsigned long flags;
749 1064
750 if (unlikely(global_disable)) 1065 if (unlikely(global_disable))
751 return; 1066 return;
@@ -756,24 +1071,17 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
756 .type = dma_debug_sg, 1071 .type = dma_debug_sg,
757 .dev = dev, 1072 .dev = dev,
758 .paddr = sg_phys(s), 1073 .paddr = sg_phys(s),
759 .dev_addr = s->dma_address, 1074 .dev_addr = sg_dma_address(s),
760 .size = s->length, 1075 .size = sg_dma_len(s),
761 .direction = dir, 1076 .direction = dir,
762 .sg_call_ents = 0, 1077 .sg_call_ents = nelems,
763 }; 1078 };
764 1079
765 if (mapped_ents && i >= mapped_ents) 1080 if (mapped_ents && i >= mapped_ents)
766 break; 1081 break;
767 1082
768 if (mapped_ents == 0) { 1083 if (!i)
769 struct hash_bucket *bucket; 1084 mapped_ents = get_nr_mapped_entries(dev, &ref);
770 ref.sg_call_ents = nelems;
771 bucket = get_hash_bucket(&ref, &flags);
772 entry = hash_bucket_find(bucket, &ref);
773 if (entry)
774 mapped_ents = entry->sg_mapped_ents;
775 put_hash_bucket(bucket, &flags);
776 }
777 1085
778 check_unmap(&ref); 1086 check_unmap(&ref);
779 } 1087 }
@@ -828,10 +1136,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent);
828void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1136void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
829 size_t size, int direction) 1137 size_t size, int direction)
830{ 1138{
1139 struct dma_debug_entry ref;
1140
831 if (unlikely(global_disable)) 1141 if (unlikely(global_disable))
832 return; 1142 return;
833 1143
834 check_sync(dev, dma_handle, size, 0, direction, true); 1144 ref.type = dma_debug_single;
1145 ref.dev = dev;
1146 ref.dev_addr = dma_handle;
1147 ref.size = size;
1148 ref.direction = direction;
1149 ref.sg_call_ents = 0;
1150
1151 check_sync(dev, &ref, true);
835} 1152}
836EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); 1153EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
837 1154
@@ -839,10 +1156,19 @@ void debug_dma_sync_single_for_device(struct device *dev,
839 dma_addr_t dma_handle, size_t size, 1156 dma_addr_t dma_handle, size_t size,
840 int direction) 1157 int direction)
841{ 1158{
1159 struct dma_debug_entry ref;
1160
842 if (unlikely(global_disable)) 1161 if (unlikely(global_disable))
843 return; 1162 return;
844 1163
845 check_sync(dev, dma_handle, size, 0, direction, false); 1164 ref.type = dma_debug_single;
1165 ref.dev = dev;
1166 ref.dev_addr = dma_handle;
1167 ref.size = size;
1168 ref.direction = direction;
1169 ref.sg_call_ents = 0;
1170
1171 check_sync(dev, &ref, false);
846} 1172}
847EXPORT_SYMBOL(debug_dma_sync_single_for_device); 1173EXPORT_SYMBOL(debug_dma_sync_single_for_device);
848 1174
@@ -851,10 +1177,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
851 unsigned long offset, size_t size, 1177 unsigned long offset, size_t size,
852 int direction) 1178 int direction)
853{ 1179{
1180 struct dma_debug_entry ref;
1181
854 if (unlikely(global_disable)) 1182 if (unlikely(global_disable))
855 return; 1183 return;
856 1184
857 check_sync(dev, dma_handle, size, offset, direction, true); 1185 ref.type = dma_debug_single;
1186 ref.dev = dev;
1187 ref.dev_addr = dma_handle;
1188 ref.size = offset + size;
1189 ref.direction = direction;
1190 ref.sg_call_ents = 0;
1191
1192 check_sync(dev, &ref, true);
858} 1193}
859EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); 1194EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
860 1195
@@ -863,10 +1198,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
863 unsigned long offset, 1198 unsigned long offset,
864 size_t size, int direction) 1199 size_t size, int direction)
865{ 1200{
1201 struct dma_debug_entry ref;
1202
866 if (unlikely(global_disable)) 1203 if (unlikely(global_disable))
867 return; 1204 return;
868 1205
869 check_sync(dev, dma_handle, size, offset, direction, false); 1206 ref.type = dma_debug_single;
1207 ref.dev = dev;
1208 ref.dev_addr = dma_handle;
1209 ref.size = offset + size;
1210 ref.direction = direction;
1211 ref.sg_call_ents = 0;
1212
1213 check_sync(dev, &ref, false);
870} 1214}
871EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); 1215EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
872 1216
@@ -874,14 +1218,30 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
874 int nelems, int direction) 1218 int nelems, int direction)
875{ 1219{
876 struct scatterlist *s; 1220 struct scatterlist *s;
877 int i; 1221 int mapped_ents = 0, i;
878 1222
879 if (unlikely(global_disable)) 1223 if (unlikely(global_disable))
880 return; 1224 return;
881 1225
882 for_each_sg(sg, s, nelems, i) { 1226 for_each_sg(sg, s, nelems, i) {
883 check_sync(dev, s->dma_address, s->dma_length, 0, 1227
884 direction, true); 1228 struct dma_debug_entry ref = {
1229 .type = dma_debug_sg,
1230 .dev = dev,
1231 .paddr = sg_phys(s),
1232 .dev_addr = sg_dma_address(s),
1233 .size = sg_dma_len(s),
1234 .direction = direction,
1235 .sg_call_ents = nelems,
1236 };
1237
1238 if (!i)
1239 mapped_ents = get_nr_mapped_entries(dev, &ref);
1240
1241 if (i >= mapped_ents)
1242 break;
1243
1244 check_sync(dev, &ref, true);
885 } 1245 }
886} 1246}
887EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); 1247EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -890,15 +1250,48 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
890 int nelems, int direction) 1250 int nelems, int direction)
891{ 1251{
892 struct scatterlist *s; 1252 struct scatterlist *s;
893 int i; 1253 int mapped_ents = 0, i;
894 1254
895 if (unlikely(global_disable)) 1255 if (unlikely(global_disable))
896 return; 1256 return;
897 1257
898 for_each_sg(sg, s, nelems, i) { 1258 for_each_sg(sg, s, nelems, i) {
899 check_sync(dev, s->dma_address, s->dma_length, 0, 1259
900 direction, false); 1260 struct dma_debug_entry ref = {
1261 .type = dma_debug_sg,
1262 .dev = dev,
1263 .paddr = sg_phys(s),
1264 .dev_addr = sg_dma_address(s),
1265 .size = sg_dma_len(s),
1266 .direction = direction,
1267 .sg_call_ents = nelems,
1268 };
1269 if (!i)
1270 mapped_ents = get_nr_mapped_entries(dev, &ref);
1271
1272 if (i >= mapped_ents)
1273 break;
1274
1275 check_sync(dev, &ref, false);
901 } 1276 }
902} 1277}
903EXPORT_SYMBOL(debug_dma_sync_sg_for_device); 1278EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
904 1279
1280static int __init dma_debug_driver_setup(char *str)
1281{
1282 int i;
1283
1284 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1285 current_driver_name[i] = *str;
1286 if (*str == 0)
1287 break;
1288 }
1289
1290 if (current_driver_name[0])
1291 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1292 current_driver_name);
1293
1294
1295 return 1;
1296}
1297__setup("dma_debug_driver=", dma_debug_driver_setup);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 833139ce1e22..e22c148e4b7f 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -164,7 +164,7 @@ static void ddebug_change(const struct ddebug_query *query,
164 164
165 if (!newflags) 165 if (!newflags)
166 dt->num_enabled--; 166 dt->num_enabled--;
167 else if (!dp-flags) 167 else if (!dp->flags)
168 dt->num_enabled++; 168 dt->num_enabled++;
169 dp->flags = newflags; 169 dp->flags = newflags;
170 if (newflags) { 170 if (newflags) {
diff --git a/lib/extable.c b/lib/extable.c
index 179c08745595..4cac81ec225e 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -39,7 +39,26 @@ void sort_extable(struct exception_table_entry *start,
39 sort(start, finish - start, sizeof(struct exception_table_entry), 39 sort(start, finish - start, sizeof(struct exception_table_entry),
40 cmp_ex, NULL); 40 cmp_ex, NULL);
41} 41}
42#endif 42
43#ifdef CONFIG_MODULES
44/*
45 * If the exception table is sorted, any referring to the module init
46 * will be at the beginning or the end.
47 */
48void trim_init_extable(struct module *m)
49{
50 /*trim the beginning*/
51 while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
52 m->extable++;
53 m->num_exentries--;
54 }
55 /*trim the end*/
56 while (m->num_exentries &&
57 within_module_init(m->extable[m->num_exentries-1].insn, m))
58 m->num_exentries--;
59}
60#endif /* CONFIG_MODULES */
61#endif /* !ARCH_HAS_SORT_EXTABLE */
43 62
44#ifndef ARCH_HAS_SEARCH_EXTABLE 63#ifndef ARCH_HAS_SEARCH_EXTABLE
45/* 64/*
diff --git a/lib/flex_array.c b/lib/flex_array.c
new file mode 100644
index 000000000000..7baed2fc3bc8
--- /dev/null
+++ b/lib/flex_array.c
@@ -0,0 +1,268 @@
1/*
2 * Flexible array managed in PAGE_SIZE parts
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2009
19 *
20 * Author: Dave Hansen <dave@linux.vnet.ibm.com>
21 */
22
23#include <linux/flex_array.h>
24#include <linux/slab.h>
25#include <linux/stddef.h>
26
27struct flex_array_part {
28 char elements[FLEX_ARRAY_PART_SIZE];
29};
30
31static inline int __elements_per_part(int element_size)
32{
33 return FLEX_ARRAY_PART_SIZE / element_size;
34}
35
36static inline int bytes_left_in_base(void)
37{
38 int element_offset = offsetof(struct flex_array, parts);
39 int bytes_left = FLEX_ARRAY_BASE_SIZE - element_offset;
40 return bytes_left;
41}
42
43static inline int nr_base_part_ptrs(void)
44{
45 return bytes_left_in_base() / sizeof(struct flex_array_part *);
46}
47
48/*
49 * If a user requests an allocation which is small
50 * enough, we may simply use the space in the
51 * flex_array->parts[] array to store the user
52 * data.
53 */
54static inline int elements_fit_in_base(struct flex_array *fa)
55{
56 int data_size = fa->element_size * fa->total_nr_elements;
57 if (data_size <= bytes_left_in_base())
58 return 1;
59 return 0;
60}
61
62/**
63 * flex_array_alloc - allocate a new flexible array
64 * @element_size: the size of individual elements in the array
65 * @total: total number of elements that this should hold
66 *
67 * Note: all locking must be provided by the caller.
68 *
69 * @total is used to size internal structures. If the user ever
70 * accesses any array indexes >=@total, it will produce errors.
71 *
72 * The maximum number of elements is defined as: the number of
73 * elements that can be stored in a page times the number of
74 * page pointers that we can fit in the base structure or (using
75 * integer math):
76 *
77 * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
78 *
79 * Here's a table showing example capacities. Note that the maximum
80 * index that the get/put() functions is just nr_objects-1. This
81 * basically means that you get 4MB of storage on 32-bit and 2MB on
82 * 64-bit.
83 *
84 *
85 * Element size | Objects | Objects |
86 * PAGE_SIZE=4k | 32-bit | 64-bit |
87 * ---------------------------------|
88 * 1 bytes | 4186112 | 2093056 |
89 * 2 bytes | 2093056 | 1046528 |
90 * 3 bytes | 1395030 | 697515 |
91 * 4 bytes | 1046528 | 523264 |
92 * 32 bytes | 130816 | 65408 |
93 * 33 bytes | 126728 | 63364 |
94 * 2048 bytes | 2044 | 1022 |
95 * 2049 bytes | 1022 | 511 |
96 * void * | 1046528 | 261632 |
97 *
98 * Since 64-bit pointers are twice the size, we lose half the
99 * capacity in the base structure. Also note that no effort is made
100 * to efficiently pack objects across page boundaries.
101 */
102struct flex_array *flex_array_alloc(int element_size, unsigned int total,
103 gfp_t flags)
104{
105 struct flex_array *ret;
106 int max_size = nr_base_part_ptrs() * __elements_per_part(element_size);
107
108 /* max_size will end up 0 if element_size > PAGE_SIZE */
109 if (total > max_size)
110 return NULL;
111 ret = kzalloc(sizeof(struct flex_array), flags);
112 if (!ret)
113 return NULL;
114 ret->element_size = element_size;
115 ret->total_nr_elements = total;
116 return ret;
117}
118
119static int fa_element_to_part_nr(struct flex_array *fa,
120 unsigned int element_nr)
121{
122 return element_nr / __elements_per_part(fa->element_size);
123}
124
125/**
126 * flex_array_free_parts - just free the second-level pages
127 *
128 * This is to be used in cases where the base 'struct flex_array'
129 * has been statically allocated and should not be free.
130 */
131void flex_array_free_parts(struct flex_array *fa)
132{
133 int part_nr;
134 int max_part = nr_base_part_ptrs();
135
136 if (elements_fit_in_base(fa))
137 return;
138 for (part_nr = 0; part_nr < max_part; part_nr++)
139 kfree(fa->parts[part_nr]);
140}
141
142void flex_array_free(struct flex_array *fa)
143{
144 flex_array_free_parts(fa);
145 kfree(fa);
146}
147
148static unsigned int index_inside_part(struct flex_array *fa,
149 unsigned int element_nr)
150{
151 unsigned int part_offset;
152
153 part_offset = element_nr % __elements_per_part(fa->element_size);
154 return part_offset * fa->element_size;
155}
156
157static struct flex_array_part *
158__fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
159{
160 struct flex_array_part *part = fa->parts[part_nr];
161 if (!part) {
162 /*
163 * This leaves the part pages uninitialized
164 * and with potentially random data, just
165 * as if the user had kmalloc()'d the whole.
166 * __GFP_ZERO can be used to zero it.
167 */
168 part = kmalloc(FLEX_ARRAY_PART_SIZE, flags);
169 if (!part)
170 return NULL;
171 fa->parts[part_nr] = part;
172 }
173 return part;
174}
175
176/**
177 * flex_array_put - copy data into the array at @element_nr
178 * @src: address of data to copy into the array
179 * @element_nr: index of the position in which to insert
180 * the new element.
181 *
182 * Note that this *copies* the contents of @src into
183 * the array. If you are trying to store an array of
184 * pointers, make sure to pass in &ptr instead of ptr.
185 *
186 * Locking must be provided by the caller.
187 */
188int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
189 gfp_t flags)
190{
191 int part_nr = fa_element_to_part_nr(fa, element_nr);
192 struct flex_array_part *part;
193 void *dst;
194
195 if (element_nr >= fa->total_nr_elements)
196 return -ENOSPC;
197 if (elements_fit_in_base(fa))
198 part = (struct flex_array_part *)&fa->parts[0];
199 else {
200 part = __fa_get_part(fa, part_nr, flags);
201 if (!part)
202 return -ENOMEM;
203 }
204 dst = &part->elements[index_inside_part(fa, element_nr)];
205 memcpy(dst, src, fa->element_size);
206 return 0;
207}
208
209/**
210 * flex_array_prealloc - guarantee that array space exists
211 * @start: index of first array element for which space is allocated
212 * @end: index of last (inclusive) element for which space is allocated
213 *
214 * This will guarantee that no future calls to flex_array_put()
215 * will allocate memory. It can be used if you are expecting to
216 * be holding a lock or in some atomic context while writing
217 * data into the array.
218 *
219 * Locking must be provided by the caller.
220 */
221int flex_array_prealloc(struct flex_array *fa, unsigned int start,
222 unsigned int end, gfp_t flags)
223{
224 int start_part;
225 int end_part;
226 int part_nr;
227 struct flex_array_part *part;
228
229 if (start >= fa->total_nr_elements || end >= fa->total_nr_elements)
230 return -ENOSPC;
231 if (elements_fit_in_base(fa))
232 return 0;
233 start_part = fa_element_to_part_nr(fa, start);
234 end_part = fa_element_to_part_nr(fa, end);
235 for (part_nr = start_part; part_nr <= end_part; part_nr++) {
236 part = __fa_get_part(fa, part_nr, flags);
237 if (!part)
238 return -ENOMEM;
239 }
240 return 0;
241}
242
243/**
244 * flex_array_get - pull data back out of the array
245 * @element_nr: index of the element to fetch from the array
246 *
247 * Returns a pointer to the data at index @element_nr. Note
248 * that this is a copy of the data that was passed in. If you
249 * are using this to store pointers, you'll get back &ptr.
250 *
251 * Locking must be provided by the caller.
252 */
253void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
254{
255 int part_nr = fa_element_to_part_nr(fa, element_nr);
256 struct flex_array_part *part;
257
258 if (element_nr >= fa->total_nr_elements)
259 return NULL;
260 if (elements_fit_in_base(fa))
261 part = (struct flex_array_part *)&fa->parts[0];
262 else {
263 part = fa->parts[part_nr];
264 if (!part)
265 return NULL;
266 }
267 return &part->elements[index_inside_part(fa, element_nr)];
268}
diff --git a/lib/gcd.c b/lib/gcd.c
new file mode 100644
index 000000000000..f879033d9822
--- /dev/null
+++ b/lib/gcd.c
@@ -0,0 +1,18 @@
1#include <linux/kernel.h>
2#include <linux/gcd.h>
3#include <linux/module.h>
4
5/* Greatest common divisor */
6unsigned long gcd(unsigned long a, unsigned long b)
7{
8 unsigned long r;
9
10 if (a < b)
11 swap(a, b);
12 while ((r = a % b) != 0) {
13 a = b;
14 b = r;
15 }
16 return b;
17}
18EXPORT_SYMBOL_GPL(gcd);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index f6d276db2d58..eed2bdb865e7 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -85,7 +85,6 @@ void gen_pool_destroy(struct gen_pool *pool)
85 int bit, end_bit; 85 int bit, end_bit;
86 86
87 87
88 write_lock(&pool->lock);
89 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { 88 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
90 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 89 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
91 list_del(&chunk->next_chunk); 90 list_del(&chunk->next_chunk);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index f07c0db81d26..39af2560f765 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -65,7 +65,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
65 65
66 for (j = 0; j < ngroups; j++) 66 for (j = 0; j < ngroups; j++)
67 lx += scnprintf(linebuf + lx, linebuflen - lx, 67 lx += scnprintf(linebuf + lx, linebuflen - lx,
68 "%16.16llx ", (unsigned long long)*(ptr8 + j)); 68 "%s%16.16llx", j ? " " : "",
69 (unsigned long long)*(ptr8 + j));
69 ascii_column = 17 * ngroups + 2; 70 ascii_column = 17 * ngroups + 2;
70 break; 71 break;
71 } 72 }
@@ -76,7 +77,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
76 77
77 for (j = 0; j < ngroups; j++) 78 for (j = 0; j < ngroups; j++)
78 lx += scnprintf(linebuf + lx, linebuflen - lx, 79 lx += scnprintf(linebuf + lx, linebuflen - lx,
79 "%8.8x ", *(ptr4 + j)); 80 "%s%8.8x", j ? " " : "", *(ptr4 + j));
80 ascii_column = 9 * ngroups + 2; 81 ascii_column = 9 * ngroups + 2;
81 break; 82 break;
82 } 83 }
@@ -87,19 +88,21 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
87 88
88 for (j = 0; j < ngroups; j++) 89 for (j = 0; j < ngroups; j++)
89 lx += scnprintf(linebuf + lx, linebuflen - lx, 90 lx += scnprintf(linebuf + lx, linebuflen - lx,
90 "%4.4x ", *(ptr2 + j)); 91 "%s%4.4x", j ? " " : "", *(ptr2 + j));
91 ascii_column = 5 * ngroups + 2; 92 ascii_column = 5 * ngroups + 2;
92 break; 93 break;
93 } 94 }
94 95
95 default: 96 default:
96 for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; 97 for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
97 j++) {
98 ch = ptr[j]; 98 ch = ptr[j];
99 linebuf[lx++] = hex_asc_hi(ch); 99 linebuf[lx++] = hex_asc_hi(ch);
100 linebuf[lx++] = hex_asc_lo(ch); 100 linebuf[lx++] = hex_asc_lo(ch);
101 linebuf[lx++] = ' '; 101 linebuf[lx++] = ' ';
102 } 102 }
103 if (j)
104 lx--;
105
103 ascii_column = 3 * rowsize + 2; 106 ascii_column = 3 * rowsize + 2;
104 break; 107 break;
105 } 108 }
@@ -108,7 +111,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
108 111
109 while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) 112 while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
110 linebuf[lx++] = ' '; 113 linebuf[lx++] = ' ';
111 for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++) 114 for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
112 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] 115 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
113 : '.'; 116 : '.';
114nil: 117nil:
diff --git a/lib/kobject.c b/lib/kobject.c
index bacf6fe4f7a0..b512b746d2af 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -793,11 +793,16 @@ static struct kset *kset_create(const char *name,
793 struct kobject *parent_kobj) 793 struct kobject *parent_kobj)
794{ 794{
795 struct kset *kset; 795 struct kset *kset;
796 int retval;
796 797
797 kset = kzalloc(sizeof(*kset), GFP_KERNEL); 798 kset = kzalloc(sizeof(*kset), GFP_KERNEL);
798 if (!kset) 799 if (!kset)
799 return NULL; 800 return NULL;
800 kobject_set_name(&kset->kobj, name); 801 retval = kobject_set_name(&kset->kobj, name);
802 if (retval) {
803 kfree(kset);
804 return NULL;
805 }
801 kset->uevent_ops = uevent_ops; 806 kset->uevent_ops = uevent_ops;
802 kset->kobj.parent = parent_kobj; 807 kset->kobj.parent = parent_kobj;
803 808
diff --git a/lib/lmb.c b/lib/lmb.c
index e4a6482d8b26..0343c05609f0 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -429,7 +429,7 @@ u64 __init lmb_phys_mem_size(void)
429 return lmb.memory.size; 429 return lmb.memory.size;
430} 430}
431 431
432u64 __init lmb_end_of_DRAM(void) 432u64 lmb_end_of_DRAM(void)
433{ 433{
434 int idx = lmb.memory.cnt - 1; 434 int idx = lmb.memory.cnt - 1;
435 435
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 4bb42a0344ec..23abbd93cae1 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -351,20 +351,12 @@ int radix_tree_insert(struct radix_tree_root *root,
351} 351}
352EXPORT_SYMBOL(radix_tree_insert); 352EXPORT_SYMBOL(radix_tree_insert);
353 353
354/** 354/*
355 * radix_tree_lookup_slot - lookup a slot in a radix tree 355 * is_slot == 1 : search for the slot.
356 * @root: radix tree root 356 * is_slot == 0 : search for the node.
357 * @index: index key
358 *
359 * Returns: the slot corresponding to the position @index in the
360 * radix tree @root. This is useful for update-if-exists operations.
361 *
362 * This function can be called under rcu_read_lock iff the slot is not
363 * modified by radix_tree_replace_slot, otherwise it must be called
364 * exclusive from other writers. Any dereference of the slot must be done
365 * using radix_tree_deref_slot.
366 */ 357 */
367void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) 358static void *radix_tree_lookup_element(struct radix_tree_root *root,
359 unsigned long index, int is_slot)
368{ 360{
369 unsigned int height, shift; 361 unsigned int height, shift;
370 struct radix_tree_node *node, **slot; 362 struct radix_tree_node *node, **slot;
@@ -376,7 +368,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
376 if (!radix_tree_is_indirect_ptr(node)) { 368 if (!radix_tree_is_indirect_ptr(node)) {
377 if (index > 0) 369 if (index > 0)
378 return NULL; 370 return NULL;
379 return (void **)&root->rnode; 371 return is_slot ? (void *)&root->rnode : node;
380 } 372 }
381 node = radix_tree_indirect_to_ptr(node); 373 node = radix_tree_indirect_to_ptr(node);
382 374
@@ -397,7 +389,25 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
397 height--; 389 height--;
398 } while (height > 0); 390 } while (height > 0);
399 391
400 return (void **)slot; 392 return is_slot ? (void *)slot:node;
393}
394
395/**
396 * radix_tree_lookup_slot - lookup a slot in a radix tree
397 * @root: radix tree root
398 * @index: index key
399 *
400 * Returns: the slot corresponding to the position @index in the
401 * radix tree @root. This is useful for update-if-exists operations.
402 *
403 * This function can be called under rcu_read_lock iff the slot is not
404 * modified by radix_tree_replace_slot, otherwise it must be called
405 * exclusive from other writers. Any dereference of the slot must be done
406 * using radix_tree_deref_slot.
407 */
408void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
409{
410 return (void **)radix_tree_lookup_element(root, index, 1);
401} 411}
402EXPORT_SYMBOL(radix_tree_lookup_slot); 412EXPORT_SYMBOL(radix_tree_lookup_slot);
403 413
@@ -415,38 +425,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
415 */ 425 */
416void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) 426void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
417{ 427{
418 unsigned int height, shift; 428 return radix_tree_lookup_element(root, index, 0);
419 struct radix_tree_node *node, **slot;
420
421 node = rcu_dereference(root->rnode);
422 if (node == NULL)
423 return NULL;
424
425 if (!radix_tree_is_indirect_ptr(node)) {
426 if (index > 0)
427 return NULL;
428 return node;
429 }
430 node = radix_tree_indirect_to_ptr(node);
431
432 height = node->height;
433 if (index > radix_tree_maxindex(height))
434 return NULL;
435
436 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
437
438 do {
439 slot = (struct radix_tree_node **)
440 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
441 node = rcu_dereference(*slot);
442 if (node == NULL)
443 return NULL;
444
445 shift -= RADIX_TREE_MAP_SHIFT;
446 height--;
447 } while (height > 0);
448
449 return node;
450} 429}
451EXPORT_SYMBOL(radix_tree_lookup); 430EXPORT_SYMBOL(radix_tree_lookup);
452 431
@@ -666,6 +645,43 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
666} 645}
667EXPORT_SYMBOL(radix_tree_next_hole); 646EXPORT_SYMBOL(radix_tree_next_hole);
668 647
648/**
649 * radix_tree_prev_hole - find the prev hole (not-present entry)
650 * @root: tree root
651 * @index: index key
652 * @max_scan: maximum range to search
653 *
654 * Search backwards in the range [max(index-max_scan+1, 0), index]
655 * for the first hole.
656 *
657 * Returns: the index of the hole if found, otherwise returns an index
658 * outside of the set specified (in which case 'index - return >= max_scan'
659 * will be true). In rare cases of wrap-around, LONG_MAX will be returned.
660 *
661 * radix_tree_next_hole may be called under rcu_read_lock. However, like
662 * radix_tree_gang_lookup, this will not atomically search a snapshot of
663 * the tree at a single point in time. For example, if a hole is created
664 * at index 10, then subsequently a hole is created at index 5,
665 * radix_tree_prev_hole covering both indexes may return 5 if called under
666 * rcu_read_lock.
667 */
668unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
669 unsigned long index, unsigned long max_scan)
670{
671 unsigned long i;
672
673 for (i = 0; i < max_scan; i++) {
674 if (!radix_tree_lookup(root, index))
675 break;
676 index--;
677 if (index == LONG_MAX)
678 break;
679 }
680
681 return index;
682}
683EXPORT_SYMBOL(radix_tree_prev_hole);
684
669static unsigned int 685static unsigned int
670__lookup(struct radix_tree_node *slot, void ***results, unsigned long index, 686__lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
671 unsigned int max_items, unsigned long *next_index) 687 unsigned int max_items, unsigned long *next_index)
diff --git a/lib/rational.c b/lib/rational.c
new file mode 100644
index 000000000000..b3c099b5478e
--- /dev/null
+++ b/lib/rational.c
@@ -0,0 +1,62 @@
1/*
2 * rational fractions
3 *
4 * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <os@emlix.com>
5 *
6 * helper functions when coping with rational numbers
7 */
8
9#include <linux/rational.h>
10
11/*
12 * calculate best rational approximation for a given fraction
13 * taking into account restricted register size, e.g. to find
14 * appropriate values for a pll with 5 bit denominator and
15 * 8 bit numerator register fields, trying to set up with a
16 * frequency ratio of 3.1415, one would say:
17 *
18 * rational_best_approximation(31415, 10000,
19 * (1 << 8) - 1, (1 << 5) - 1, &n, &d);
20 *
21 * you may look at given_numerator as a fixed point number,
22 * with the fractional part size described in given_denominator.
23 *
24 * for theoretical background, see:
25 * http://en.wikipedia.org/wiki/Continued_fraction
26 */
27
28void rational_best_approximation(
29 unsigned long given_numerator, unsigned long given_denominator,
30 unsigned long max_numerator, unsigned long max_denominator,
31 unsigned long *best_numerator, unsigned long *best_denominator)
32{
33 unsigned long n, d, n0, d0, n1, d1;
34 n = given_numerator;
35 d = given_denominator;
36 n0 = d1 = 0;
37 n1 = d0 = 1;
38 for (;;) {
39 unsigned long t, a;
40 if ((n1 > max_numerator) || (d1 > max_denominator)) {
41 n1 = n0;
42 d1 = d0;
43 break;
44 }
45 if (d == 0)
46 break;
47 t = d;
48 a = n / d;
49 d = n % d;
50 n = t;
51 t = n0 + a * n1;
52 n0 = n1;
53 n1 = t;
54 t = d0 + a * d1;
55 d0 = d1;
56 d1 = t;
57 }
58 *best_numerator = n1;
59 *best_denominator = d1;
60}
61
62EXPORT_SYMBOL(rational_best_approximation);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index f653659e0bc1..e2aa3be29858 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -231,34 +231,34 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
231 node = node->rb_right; 231 node = node->rb_right;
232 while ((left = node->rb_left) != NULL) 232 while ((left = node->rb_left) != NULL)
233 node = left; 233 node = left;
234
235 if (rb_parent(old)) {
236 if (rb_parent(old)->rb_left == old)
237 rb_parent(old)->rb_left = node;
238 else
239 rb_parent(old)->rb_right = node;
240 } else
241 root->rb_node = node;
242
234 child = node->rb_right; 243 child = node->rb_right;
235 parent = rb_parent(node); 244 parent = rb_parent(node);
236 color = rb_color(node); 245 color = rb_color(node);
237 246
238 if (child)
239 rb_set_parent(child, parent);
240 if (parent == old) { 247 if (parent == old) {
241 parent->rb_right = child;
242 parent = node; 248 parent = node;
243 } else 249 } else {
250 if (child)
251 rb_set_parent(child, parent);
244 parent->rb_left = child; 252 parent->rb_left = child;
245 253
254 node->rb_right = old->rb_right;
255 rb_set_parent(old->rb_right, node);
256 }
257
246 node->rb_parent_color = old->rb_parent_color; 258 node->rb_parent_color = old->rb_parent_color;
247 node->rb_right = old->rb_right;
248 node->rb_left = old->rb_left; 259 node->rb_left = old->rb_left;
249
250 if (rb_parent(old))
251 {
252 if (rb_parent(old)->rb_left == old)
253 rb_parent(old)->rb_left = node;
254 else
255 rb_parent(old)->rb_right = node;
256 } else
257 root->rb_node = node;
258
259 rb_set_parent(old->rb_left, node); 260 rb_set_parent(old->rb_left, node);
260 if (old->rb_right) 261
261 rb_set_parent(old->rb_right, node);
262 goto color; 262 goto color;
263 } 263 }
264 264
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a295e404e908..0d475d8167bf 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -314,6 +314,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
314 miter->__sg = sgl; 314 miter->__sg = sgl;
315 miter->__nents = nents; 315 miter->__nents = nents;
316 miter->__offset = 0; 316 miter->__offset = 0;
317 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
317 miter->__flags = flags; 318 miter->__flags = flags;
318} 319}
319EXPORT_SYMBOL(sg_miter_start); 320EXPORT_SYMBOL(sg_miter_start);
@@ -394,6 +395,9 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
394 if (miter->addr) { 395 if (miter->addr) {
395 miter->__offset += miter->consumed; 396 miter->__offset += miter->consumed;
396 397
398 if (miter->__flags & SG_MITER_TO_SG)
399 flush_kernel_dcache_page(miter->page);
400
397 if (miter->__flags & SG_MITER_ATOMIC) { 401 if (miter->__flags & SG_MITER_ATOMIC) {
398 WARN_ON(!irqs_disabled()); 402 WARN_ON(!irqs_disabled());
399 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); 403 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
@@ -426,8 +430,14 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
426 unsigned int offset = 0; 430 unsigned int offset = 0;
427 struct sg_mapping_iter miter; 431 struct sg_mapping_iter miter;
428 unsigned long flags; 432 unsigned long flags;
433 unsigned int sg_flags = SG_MITER_ATOMIC;
434
435 if (to_buffer)
436 sg_flags |= SG_MITER_FROM_SG;
437 else
438 sg_flags |= SG_MITER_TO_SG;
429 439
430 sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); 440 sg_miter_start(&miter, sgl, nents, sg_flags);
431 441
432 local_irq_save(flags); 442 local_irq_save(flags);
433 443
@@ -438,10 +448,8 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
438 448
439 if (to_buffer) 449 if (to_buffer)
440 memcpy(buf + offset, miter.addr, len); 450 memcpy(buf + offset, miter.addr, len);
441 else { 451 else
442 memcpy(miter.addr, buf + offset, len); 452 memcpy(miter.addr, buf + offset, len);
443 flush_kernel_dcache_page(miter.page);
444 }
445 453
446 offset += len; 454 offset += len;
447 } 455 }
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 2b0b5a7d2ced..bffe6d7ef9d9 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -60,8 +60,8 @@ enum dma_sync_target {
60int swiotlb_force; 60int swiotlb_force;
61 61
62/* 62/*
63 * Used to do a quick range check in swiotlb_unmap_single and 63 * Used to do a quick range check in unmap_single and
64 * swiotlb_sync_single_*, to see if the memory was in fact allocated by this 64 * sync_single_*, to see if the memory was in fact allocated by this
65 * API. 65 * API.
66 */ 66 */
67static char *io_tlb_start, *io_tlb_end; 67static char *io_tlb_start, *io_tlb_end;
@@ -129,7 +129,7 @@ dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
129 return paddr; 129 return paddr;
130} 130}
131 131
132phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr) 132phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
133{ 133{
134 return baddr; 134 return baddr;
135} 135}
@@ -140,9 +140,15 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
140 return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); 140 return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
141} 141}
142 142
143static void *swiotlb_bus_to_virt(dma_addr_t address) 143void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address)
144{ 144{
145 return phys_to_virt(swiotlb_bus_to_phys(address)); 145 return phys_to_virt(swiotlb_bus_to_phys(hwdev, address));
146}
147
148int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
149 dma_addr_t addr, size_t size)
150{
151 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
146} 152}
147 153
148int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) 154int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
@@ -309,10 +315,10 @@ cleanup1:
309 return -ENOMEM; 315 return -ENOMEM;
310} 316}
311 317
312static int 318static inline int
313address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) 319address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
314{ 320{
315 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); 321 return swiotlb_arch_address_needs_mapping(hwdev, addr, size);
316} 322}
317 323
318static inline int range_needs_mapping(phys_addr_t paddr, size_t size) 324static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
@@ -341,7 +347,7 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
341 unsigned long flags; 347 unsigned long flags;
342 348
343 while (size) { 349 while (size) {
344 sz = min(PAGE_SIZE - offset, size); 350 sz = min_t(size_t, PAGE_SIZE - offset, size);
345 351
346 local_irq_save(flags); 352 local_irq_save(flags);
347 buffer = kmap_atomic(pfn_to_page(pfn), 353 buffer = kmap_atomic(pfn_to_page(pfn),
@@ -476,7 +482,7 @@ found:
476 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 482 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
477 */ 483 */
478static void 484static void
479unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) 485do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
480{ 486{
481 unsigned long flags; 487 unsigned long flags;
482 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 488 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -560,7 +566,6 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
560 size)) { 566 size)) {
561 /* 567 /*
562 * The allocated memory isn't reachable by the device. 568 * The allocated memory isn't reachable by the device.
563 * Fall back on swiotlb_map_single().
564 */ 569 */
565 free_pages((unsigned long) ret, order); 570 free_pages((unsigned long) ret, order);
566 ret = NULL; 571 ret = NULL;
@@ -568,9 +573,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
568 if (!ret) { 573 if (!ret) {
569 /* 574 /*
570 * We are either out of memory or the device can't DMA 575 * We are either out of memory or the device can't DMA
571 * to GFP_DMA memory; fall back on 576 * to GFP_DMA memory; fall back on map_single(), which
572 * swiotlb_map_single(), which will grab memory from 577 * will grab memory from the lowest available address range.
573 * the lowest available address range.
574 */ 578 */
575 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); 579 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
576 if (!ret) 580 if (!ret)
@@ -587,7 +591,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
587 (unsigned long long)dev_addr); 591 (unsigned long long)dev_addr);
588 592
589 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 593 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
590 unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 594 do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
591 return NULL; 595 return NULL;
592 } 596 }
593 *dma_handle = dev_addr; 597 *dma_handle = dev_addr;
@@ -604,7 +608,7 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
604 free_pages((unsigned long) vaddr, get_order(size)); 608 free_pages((unsigned long) vaddr, get_order(size));
605 else 609 else
606 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 610 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
607 unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 611 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
608} 612}
609EXPORT_SYMBOL(swiotlb_free_coherent); 613EXPORT_SYMBOL(swiotlb_free_coherent);
610 614
@@ -634,7 +638,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
634 * physical address to use is returned. 638 * physical address to use is returned.
635 * 639 *
636 * Once the device is given the dma address, the device owns this memory until 640 * Once the device is given the dma address, the device owns this memory until
637 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. 641 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
638 */ 642 */
639dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, 643dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
640 unsigned long offset, size_t size, 644 unsigned long offset, size_t size,
@@ -642,18 +646,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
642 struct dma_attrs *attrs) 646 struct dma_attrs *attrs)
643{ 647{
644 phys_addr_t phys = page_to_phys(page) + offset; 648 phys_addr_t phys = page_to_phys(page) + offset;
645 void *ptr = page_address(page) + offset;
646 dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); 649 dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys);
647 void *map; 650 void *map;
648 651
649 BUG_ON(dir == DMA_NONE); 652 BUG_ON(dir == DMA_NONE);
650 /* 653 /*
651 * If the pointer passed in happens to be in the device's DMA window, 654 * If the address happens to be in the device's DMA window,
652 * we can safely return the device addr and not worry about bounce 655 * we can safely return the device addr and not worry about bounce
653 * buffering it. 656 * buffering it.
654 */ 657 */
655 if (!address_needs_mapping(dev, dev_addr, size) && 658 if (!address_needs_mapping(dev, dev_addr, size) &&
656 !range_needs_mapping(virt_to_phys(ptr), size)) 659 !range_needs_mapping(phys, size))
657 return dev_addr; 660 return dev_addr;
658 661
659 /* 662 /*
@@ -679,23 +682,35 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
679 682
680/* 683/*
681 * Unmap a single streaming mode DMA translation. The dma_addr and size must 684 * Unmap a single streaming mode DMA translation. The dma_addr and size must
682 * match what was provided for in a previous swiotlb_map_single call. All 685 * match what was provided for in a previous swiotlb_map_page call. All
683 * other usages are undefined. 686 * other usages are undefined.
684 * 687 *
685 * After this call, reads by the cpu to the buffer are guaranteed to see 688 * After this call, reads by the cpu to the buffer are guaranteed to see
686 * whatever the device wrote there. 689 * whatever the device wrote there.
687 */ 690 */
691static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
692 size_t size, int dir)
693{
694 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
695
696 BUG_ON(dir == DMA_NONE);
697
698 if (is_swiotlb_buffer(dma_addr)) {
699 do_unmap_single(hwdev, dma_addr, size, dir);
700 return;
701 }
702
703 if (dir != DMA_FROM_DEVICE)
704 return;
705
706 dma_mark_clean(dma_addr, size);
707}
708
688void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 709void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
689 size_t size, enum dma_data_direction dir, 710 size_t size, enum dma_data_direction dir,
690 struct dma_attrs *attrs) 711 struct dma_attrs *attrs)
691{ 712{
692 char *dma_addr = swiotlb_bus_to_virt(dev_addr); 713 unmap_single(hwdev, dev_addr, size, dir);
693
694 BUG_ON(dir == DMA_NONE);
695 if (is_swiotlb_buffer(dma_addr))
696 unmap_single(hwdev, dma_addr, size, dir);
697 else if (dir == DMA_FROM_DEVICE)
698 dma_mark_clean(dma_addr, size);
699} 714}
700EXPORT_SYMBOL_GPL(swiotlb_unmap_page); 715EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
701 716
@@ -703,7 +718,7 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
703 * Make physical memory consistent for a single streaming mode DMA translation 718 * Make physical memory consistent for a single streaming mode DMA translation
704 * after a transfer. 719 * after a transfer.
705 * 720 *
706 * If you perform a swiotlb_map_single() but wish to interrogate the buffer 721 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
707 * using the cpu, yet do not wish to teardown the dma mapping, you must 722 * using the cpu, yet do not wish to teardown the dma mapping, you must
708 * call this function before doing so. At the next point you give the dma 723 * call this function before doing so. At the next point you give the dma
709 * address back to the card, you must first perform a 724 * address back to the card, you must first perform a
@@ -713,13 +728,19 @@ static void
713swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 728swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
714 size_t size, int dir, int target) 729 size_t size, int dir, int target)
715{ 730{
716 char *dma_addr = swiotlb_bus_to_virt(dev_addr); 731 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
717 732
718 BUG_ON(dir == DMA_NONE); 733 BUG_ON(dir == DMA_NONE);
719 if (is_swiotlb_buffer(dma_addr)) 734
735 if (is_swiotlb_buffer(dma_addr)) {
720 sync_single(hwdev, dma_addr, size, dir, target); 736 sync_single(hwdev, dma_addr, size, dir, target);
721 else if (dir == DMA_FROM_DEVICE) 737 return;
722 dma_mark_clean(dma_addr, size); 738 }
739
740 if (dir != DMA_FROM_DEVICE)
741 return;
742
743 dma_mark_clean(dma_addr, size);
723} 744}
724 745
725void 746void
@@ -746,13 +767,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
746 unsigned long offset, size_t size, 767 unsigned long offset, size_t size,
747 int dir, int target) 768 int dir, int target)
748{ 769{
749 char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset; 770 swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
750
751 BUG_ON(dir == DMA_NONE);
752 if (is_swiotlb_buffer(dma_addr))
753 sync_single(hwdev, dma_addr, size, dir, target);
754 else if (dir == DMA_FROM_DEVICE)
755 dma_mark_clean(dma_addr, size);
756} 771}
757 772
758void 773void
@@ -777,7 +792,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
777 792
778/* 793/*
779 * Map a set of buffers described by scatterlist in streaming mode for DMA. 794 * Map a set of buffers described by scatterlist in streaming mode for DMA.
780 * This is the scatter-gather version of the above swiotlb_map_single 795 * This is the scatter-gather version of the above swiotlb_map_page
781 * interface. Here the scatter gather list elements are each tagged with the 796 * interface. Here the scatter gather list elements are each tagged with the
782 * appropriate dma address and length. They are obtained via 797 * appropriate dma address and length. They are obtained via
783 * sg_dma_{address,length}(SG). 798 * sg_dma_{address,length}(SG).
@@ -788,7 +803,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
788 * The routine returns the number of addr/length pairs actually 803 * The routine returns the number of addr/length pairs actually
789 * used, at most nents. 804 * used, at most nents.
790 * 805 *
791 * Device ownership issues as mentioned above for swiotlb_map_single are the 806 * Device ownership issues as mentioned above for swiotlb_map_page are the
792 * same here. 807 * same here.
793 */ 808 */
794int 809int
@@ -836,7 +851,7 @@ EXPORT_SYMBOL(swiotlb_map_sg);
836 851
837/* 852/*
838 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 853 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
839 * concerning calls here are the same as for swiotlb_unmap_single() above. 854 * concerning calls here are the same as for swiotlb_unmap_page() above.
840 */ 855 */
841void 856void
842swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 857swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
@@ -847,13 +862,9 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
847 862
848 BUG_ON(dir == DMA_NONE); 863 BUG_ON(dir == DMA_NONE);
849 864
850 for_each_sg(sgl, sg, nelems, i) { 865 for_each_sg(sgl, sg, nelems, i)
851 if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) 866 unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
852 unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), 867
853 sg->dma_length, dir);
854 else if (dir == DMA_FROM_DEVICE)
855 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
856 }
857} 868}
858EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); 869EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
859 870
@@ -879,15 +890,9 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
879 struct scatterlist *sg; 890 struct scatterlist *sg;
880 int i; 891 int i;
881 892
882 BUG_ON(dir == DMA_NONE); 893 for_each_sg(sgl, sg, nelems, i)
883 894 swiotlb_sync_single(hwdev, sg->dma_address,
884 for_each_sg(sgl, sg, nelems, i) {
885 if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg)))
886 sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
887 sg->dma_length, dir, target); 895 sg->dma_length, dir, target);
888 else if (dir == DMA_FROM_DEVICE)
889 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
890 }
891} 896}
892 897
893void 898void
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 7536acea135b..756ccafa9cec 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -408,6 +408,8 @@ enum format_type {
408 FORMAT_TYPE_LONG_LONG, 408 FORMAT_TYPE_LONG_LONG,
409 FORMAT_TYPE_ULONG, 409 FORMAT_TYPE_ULONG,
410 FORMAT_TYPE_LONG, 410 FORMAT_TYPE_LONG,
411 FORMAT_TYPE_UBYTE,
412 FORMAT_TYPE_BYTE,
411 FORMAT_TYPE_USHORT, 413 FORMAT_TYPE_USHORT,
412 FORMAT_TYPE_SHORT, 414 FORMAT_TYPE_SHORT,
413 FORMAT_TYPE_UINT, 415 FORMAT_TYPE_UINT,
@@ -573,12 +575,15 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec)
573} 575}
574 576
575static char *symbol_string(char *buf, char *end, void *ptr, 577static char *symbol_string(char *buf, char *end, void *ptr,
576 struct printf_spec spec) 578 struct printf_spec spec, char ext)
577{ 579{
578 unsigned long value = (unsigned long) ptr; 580 unsigned long value = (unsigned long) ptr;
579#ifdef CONFIG_KALLSYMS 581#ifdef CONFIG_KALLSYMS
580 char sym[KSYM_SYMBOL_LEN]; 582 char sym[KSYM_SYMBOL_LEN];
581 sprint_symbol(sym, value); 583 if (ext != 'f')
584 sprint_symbol(sym, value);
585 else
586 kallsyms_lookup(value, NULL, NULL, NULL, sym);
582 return string(buf, end, sym, spec); 587 return string(buf, end, sym, spec);
583#else 588#else
584 spec.field_width = 2*sizeof(void *); 589 spec.field_width = 2*sizeof(void *);
@@ -690,7 +695,8 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr,
690 * 695 *
691 * Right now we handle: 696 * Right now we handle:
692 * 697 *
693 * - 'F' For symbolic function descriptor pointers 698 * - 'F' For symbolic function descriptor pointers with offset
699 * - 'f' For simple symbolic function names without offset
694 * - 'S' For symbolic direct pointers 700 * - 'S' For symbolic direct pointers
695 * - 'R' For a struct resource pointer, it prints the range of 701 * - 'R' For a struct resource pointer, it prints the range of
696 * addresses (not the name nor the flags) 702 * addresses (not the name nor the flags)
@@ -713,10 +719,11 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
713 719
714 switch (*fmt) { 720 switch (*fmt) {
715 case 'F': 721 case 'F':
722 case 'f':
716 ptr = dereference_function_descriptor(ptr); 723 ptr = dereference_function_descriptor(ptr);
717 /* Fallthrough */ 724 /* Fallthrough */
718 case 'S': 725 case 'S':
719 return symbol_string(buf, end, ptr, spec); 726 return symbol_string(buf, end, ptr, spec, *fmt);
720 case 'R': 727 case 'R':
721 return resource_string(buf, end, ptr, spec); 728 return resource_string(buf, end, ptr, spec);
722 case 'm': 729 case 'm':
@@ -853,11 +860,15 @@ qualifier:
853 spec->qualifier = -1; 860 spec->qualifier = -1;
854 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || 861 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
855 *fmt == 'Z' || *fmt == 'z' || *fmt == 't') { 862 *fmt == 'Z' || *fmt == 'z' || *fmt == 't') {
856 spec->qualifier = *fmt; 863 spec->qualifier = *fmt++;
857 ++fmt; 864 if (unlikely(spec->qualifier == *fmt)) {
858 if (spec->qualifier == 'l' && *fmt == 'l') { 865 if (spec->qualifier == 'l') {
859 spec->qualifier = 'L'; 866 spec->qualifier = 'L';
860 ++fmt; 867 ++fmt;
868 } else if (spec->qualifier == 'h') {
869 spec->qualifier = 'H';
870 ++fmt;
871 }
861 } 872 }
862 } 873 }
863 874
@@ -919,6 +930,11 @@ qualifier:
919 spec->type = FORMAT_TYPE_SIZE_T; 930 spec->type = FORMAT_TYPE_SIZE_T;
920 } else if (spec->qualifier == 't') { 931 } else if (spec->qualifier == 't') {
921 spec->type = FORMAT_TYPE_PTRDIFF; 932 spec->type = FORMAT_TYPE_PTRDIFF;
933 } else if (spec->qualifier == 'H') {
934 if (spec->flags & SIGN)
935 spec->type = FORMAT_TYPE_BYTE;
936 else
937 spec->type = FORMAT_TYPE_UBYTE;
922 } else if (spec->qualifier == 'h') { 938 } else if (spec->qualifier == 'h') {
923 if (spec->flags & SIGN) 939 if (spec->flags & SIGN)
924 spec->type = FORMAT_TYPE_SHORT; 940 spec->type = FORMAT_TYPE_SHORT;
@@ -943,7 +959,8 @@ qualifier:
943 * 959 *
944 * This function follows C99 vsnprintf, but has some extensions: 960 * This function follows C99 vsnprintf, but has some extensions:
945 * %pS output the name of a text symbol 961 * %pS output the name of a text symbol
946 * %pF output the name of a function pointer 962 * %pF output the name of a function pointer with its offset
963 * %pf output the name of a function pointer without its offset
947 * %pR output the address range in a struct resource 964 * %pR output the address range in a struct resource
948 * 965 *
949 * The return value is the number of characters which would 966 * The return value is the number of characters which would
@@ -1087,6 +1104,12 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1087 case FORMAT_TYPE_PTRDIFF: 1104 case FORMAT_TYPE_PTRDIFF:
1088 num = va_arg(args, ptrdiff_t); 1105 num = va_arg(args, ptrdiff_t);
1089 break; 1106 break;
1107 case FORMAT_TYPE_UBYTE:
1108 num = (unsigned char) va_arg(args, int);
1109 break;
1110 case FORMAT_TYPE_BYTE:
1111 num = (signed char) va_arg(args, int);
1112 break;
1090 case FORMAT_TYPE_USHORT: 1113 case FORMAT_TYPE_USHORT:
1091 num = (unsigned short) va_arg(args, int); 1114 num = (unsigned short) va_arg(args, int);
1092 break; 1115 break;
@@ -1363,6 +1386,10 @@ do { \
1363 case FORMAT_TYPE_PTRDIFF: 1386 case FORMAT_TYPE_PTRDIFF:
1364 save_arg(ptrdiff_t); 1387 save_arg(ptrdiff_t);
1365 break; 1388 break;
1389 case FORMAT_TYPE_UBYTE:
1390 case FORMAT_TYPE_BYTE:
1391 save_arg(char);
1392 break;
1366 case FORMAT_TYPE_USHORT: 1393 case FORMAT_TYPE_USHORT:
1367 case FORMAT_TYPE_SHORT: 1394 case FORMAT_TYPE_SHORT:
1368 save_arg(short); 1395 save_arg(short);
@@ -1391,7 +1418,8 @@ EXPORT_SYMBOL_GPL(vbin_printf);
1391 * 1418 *
1392 * The format follows C99 vsnprintf, but has some extensions: 1419 * The format follows C99 vsnprintf, but has some extensions:
1393 * %pS output the name of a text symbol 1420 * %pS output the name of a text symbol
1394 * %pF output the name of a function pointer 1421 * %pF output the name of a function pointer with its offset
1422 * %pf output the name of a function pointer without its offset
1395 * %pR output the address range in a struct resource 1423 * %pR output the address range in a struct resource
1396 * %n is ignored 1424 * %n is ignored
1397 * 1425 *
@@ -1538,6 +1566,12 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1538 case FORMAT_TYPE_PTRDIFF: 1566 case FORMAT_TYPE_PTRDIFF:
1539 num = get_arg(ptrdiff_t); 1567 num = get_arg(ptrdiff_t);
1540 break; 1568 break;
1569 case FORMAT_TYPE_UBYTE:
1570 num = get_arg(unsigned char);
1571 break;
1572 case FORMAT_TYPE_BYTE:
1573 num = get_arg(signed char);
1574 break;
1541 case FORMAT_TYPE_USHORT: 1575 case FORMAT_TYPE_USHORT:
1542 num = get_arg(unsigned short); 1576 num = get_arg(unsigned short);
1543 break; 1577 break;