diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 9 | ||||
-rw-r--r-- | lib/Kconfig.debug | 47 | ||||
-rw-r--r-- | lib/Kconfig.kmemcheck | 91 | ||||
-rw-r--r-- | lib/Makefile | 9 | ||||
-rw-r--r-- | lib/atomic64.c | 175 | ||||
-rw-r--r-- | lib/checksum.c | 193 | ||||
-rw-r--r-- | lib/cpumask.c | 23 | ||||
-rw-r--r-- | lib/dec_and_lock.c | 3 | ||||
-rw-r--r-- | lib/dma-debug.c | 496 | ||||
-rw-r--r-- | lib/extable.c | 21 | ||||
-rw-r--r-- | lib/gcd.c | 18 | ||||
-rw-r--r-- | lib/genalloc.c | 1 | ||||
-rw-r--r-- | lib/hexdump.c | 15 | ||||
-rw-r--r-- | lib/kobject.c | 7 | ||||
-rw-r--r-- | lib/radix-tree.c | 110 | ||||
-rw-r--r-- | lib/rational.c | 62 | ||||
-rw-r--r-- | lib/rbtree.c | 34 | ||||
-rw-r--r-- | lib/scatterlist.c | 9 | ||||
-rw-r--r-- | lib/swiotlb.c | 119 | ||||
-rw-r--r-- | lib/vsprintf.c | 56 |
20 files changed, 1255 insertions, 243 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 8ade0a7a91e0..bb1326d3839c 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -10,6 +10,9 @@ menu "Library routines" | |||
10 | config BITREVERSE | 10 | config BITREVERSE |
11 | tristate | 11 | tristate |
12 | 12 | ||
13 | config RATIONAL | ||
14 | boolean | ||
15 | |||
13 | config GENERIC_FIND_FIRST_BIT | 16 | config GENERIC_FIND_FIRST_BIT |
14 | bool | 17 | bool |
15 | 18 | ||
@@ -191,4 +194,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS | |||
191 | config NLATTR | 194 | config NLATTR |
192 | bool | 195 | bool |
193 | 196 | ||
197 | # | ||
198 | # Generic 64-bit atomic support is selected if needed | ||
199 | # | ||
200 | config GENERIC_ATOMIC64 | ||
201 | bool | ||
202 | |||
194 | endmenu | 203 | endmenu |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c6e854f215fa..23067ab1a73c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -300,7 +300,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT | |||
300 | 300 | ||
301 | config DEBUG_SLAB | 301 | config DEBUG_SLAB |
302 | bool "Debug slab memory allocations" | 302 | bool "Debug slab memory allocations" |
303 | depends on DEBUG_KERNEL && SLAB | 303 | depends on DEBUG_KERNEL && SLAB && !KMEMCHECK |
304 | help | 304 | help |
305 | Say Y here to have the kernel do limited verification on memory | 305 | Say Y here to have the kernel do limited verification on memory |
306 | allocation as well as poisoning memory on free to catch use of freed | 306 | allocation as well as poisoning memory on free to catch use of freed |
@@ -312,7 +312,7 @@ config DEBUG_SLAB_LEAK | |||
312 | 312 | ||
313 | config SLUB_DEBUG_ON | 313 | config SLUB_DEBUG_ON |
314 | bool "SLUB debugging on by default" | 314 | bool "SLUB debugging on by default" |
315 | depends on SLUB && SLUB_DEBUG | 315 | depends on SLUB && SLUB_DEBUG && !KMEMCHECK |
316 | default n | 316 | default n |
317 | help | 317 | help |
318 | Boot with debugging on by default. SLUB boots by default with | 318 | Boot with debugging on by default. SLUB boots by default with |
@@ -336,6 +336,38 @@ config SLUB_STATS | |||
336 | out which slabs are relevant to a particular load. | 336 | out which slabs are relevant to a particular load. |
337 | Try running: slabinfo -DA | 337 | Try running: slabinfo -DA |
338 | 338 | ||
339 | config DEBUG_KMEMLEAK | ||
340 | bool "Kernel memory leak detector" | ||
341 | depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \ | ||
342 | !MEMORY_HOTPLUG | ||
343 | select DEBUG_SLAB if SLAB | ||
344 | select SLUB_DEBUG if SLUB | ||
345 | select DEBUG_FS if SYSFS | ||
346 | select STACKTRACE if STACKTRACE_SUPPORT | ||
347 | select KALLSYMS | ||
348 | help | ||
349 | Say Y here if you want to enable the memory leak | ||
350 | detector. The memory allocation/freeing is traced in a way | ||
351 | similar to the Boehm's conservative garbage collector, the | ||
352 | difference being that the orphan objects are not freed but | ||
353 | only shown in /sys/kernel/debug/kmemleak. Enabling this | ||
354 | feature will introduce an overhead to memory | ||
355 | allocations. See Documentation/kmemleak.txt for more | ||
356 | details. | ||
357 | |||
358 | In order to access the kmemleak file, debugfs needs to be | ||
359 | mounted (usually at /sys/kernel/debug). | ||
360 | |||
361 | config DEBUG_KMEMLEAK_TEST | ||
362 | tristate "Simple test for the kernel memory leak detector" | ||
363 | depends on DEBUG_KMEMLEAK | ||
364 | help | ||
365 | Say Y or M here to build a test for the kernel memory leak | ||
366 | detector. This option enables a module that explicitly leaks | ||
367 | memory. | ||
368 | |||
369 | If unsure, say N. | ||
370 | |||
339 | config DEBUG_PREEMPT | 371 | config DEBUG_PREEMPT |
340 | bool "Debug preemptible kernel" | 372 | bool "Debug preemptible kernel" |
341 | depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) | 373 | depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) |
@@ -440,7 +472,7 @@ config LOCKDEP | |||
440 | bool | 472 | bool |
441 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 473 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
442 | select STACKTRACE | 474 | select STACKTRACE |
443 | select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND | 475 | select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 |
444 | select KALLSYMS | 476 | select KALLSYMS |
445 | select KALLSYMS_ALL | 477 | select KALLSYMS_ALL |
446 | 478 | ||
@@ -620,7 +652,7 @@ config ARCH_WANT_FRAME_POINTERS | |||
620 | config FRAME_POINTER | 652 | config FRAME_POINTER |
621 | bool "Compile the kernel with frame pointers" | 653 | bool "Compile the kernel with frame pointers" |
622 | depends on DEBUG_KERNEL && \ | 654 | depends on DEBUG_KERNEL && \ |
623 | (CRIS || M68K || M68KNOMMU || FRV || UML || S390 || \ | 655 | (CRIS || M68K || M68KNOMMU || FRV || UML || \ |
624 | AVR32 || SUPERH || BLACKFIN || MN10300) || \ | 656 | AVR32 || SUPERH || BLACKFIN || MN10300) || \ |
625 | ARCH_WANT_FRAME_POINTERS | 657 | ARCH_WANT_FRAME_POINTERS |
626 | default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS | 658 | default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS |
@@ -809,13 +841,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER | |||
809 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT | 841 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT |
810 | depends on !X86_64 | 842 | depends on !X86_64 |
811 | select STACKTRACE | 843 | select STACKTRACE |
812 | select FRAME_POINTER if !PPC | 844 | select FRAME_POINTER if !PPC && !S390 |
813 | help | 845 | help |
814 | Provide stacktrace filter for fault-injection capabilities | 846 | Provide stacktrace filter for fault-injection capabilities |
815 | 847 | ||
816 | config LATENCYTOP | 848 | config LATENCYTOP |
817 | bool "Latency measuring infrastructure" | 849 | bool "Latency measuring infrastructure" |
818 | select FRAME_POINTER if !MIPS && !PPC | 850 | select FRAME_POINTER if !MIPS && !PPC && !S390 |
819 | select KALLSYMS | 851 | select KALLSYMS |
820 | select KALLSYMS_ALL | 852 | select KALLSYMS_ALL |
821 | select STACKTRACE | 853 | select STACKTRACE |
@@ -891,7 +923,6 @@ config DYNAMIC_DEBUG | |||
891 | default n | 923 | default n |
892 | depends on PRINTK | 924 | depends on PRINTK |
893 | depends on DEBUG_FS | 925 | depends on DEBUG_FS |
894 | select PRINTK_DEBUG | ||
895 | help | 926 | help |
896 | 927 | ||
897 | Compiles debug level messages into the kernel, which would not | 928 | Compiles debug level messages into the kernel, which would not |
@@ -965,3 +996,5 @@ config DMA_API_DEBUG | |||
965 | source "samples/Kconfig" | 996 | source "samples/Kconfig" |
966 | 997 | ||
967 | source "lib/Kconfig.kgdb" | 998 | source "lib/Kconfig.kgdb" |
999 | |||
1000 | source "lib/Kconfig.kmemcheck" | ||
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck new file mode 100644 index 000000000000..603c81b66549 --- /dev/null +++ b/lib/Kconfig.kmemcheck | |||
@@ -0,0 +1,91 @@ | |||
1 | config HAVE_ARCH_KMEMCHECK | ||
2 | bool | ||
3 | |||
4 | menuconfig KMEMCHECK | ||
5 | bool "kmemcheck: trap use of uninitialized memory" | ||
6 | depends on DEBUG_KERNEL | ||
7 | depends on !X86_USE_3DNOW | ||
8 | depends on SLUB || SLAB | ||
9 | depends on !CC_OPTIMIZE_FOR_SIZE | ||
10 | depends on !FUNCTION_TRACER | ||
11 | select FRAME_POINTER | ||
12 | select STACKTRACE | ||
13 | default n | ||
14 | help | ||
15 | This option enables tracing of dynamically allocated kernel memory | ||
16 | to see if memory is used before it has been given an initial value. | ||
17 | Be aware that this requires half of your memory for bookkeeping and | ||
18 | will insert extra code at *every* read and write to tracked memory | ||
19 | thus slow down the kernel code (but user code is unaffected). | ||
20 | |||
21 | The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable | ||
22 | or enable kmemcheck at boot-time. If the kernel is started with | ||
23 | kmemcheck=0, the large memory and CPU overhead is not incurred. | ||
24 | |||
25 | choice | ||
26 | prompt "kmemcheck: default mode at boot" | ||
27 | depends on KMEMCHECK | ||
28 | default KMEMCHECK_ONESHOT_BY_DEFAULT | ||
29 | help | ||
30 | This option controls the default behaviour of kmemcheck when the | ||
31 | kernel boots and no kmemcheck= parameter is given. | ||
32 | |||
33 | config KMEMCHECK_DISABLED_BY_DEFAULT | ||
34 | bool "disabled" | ||
35 | depends on KMEMCHECK | ||
36 | |||
37 | config KMEMCHECK_ENABLED_BY_DEFAULT | ||
38 | bool "enabled" | ||
39 | depends on KMEMCHECK | ||
40 | |||
41 | config KMEMCHECK_ONESHOT_BY_DEFAULT | ||
42 | bool "one-shot" | ||
43 | depends on KMEMCHECK | ||
44 | help | ||
45 | In one-shot mode, only the first error detected is reported before | ||
46 | kmemcheck is disabled. | ||
47 | |||
48 | endchoice | ||
49 | |||
50 | config KMEMCHECK_QUEUE_SIZE | ||
51 | int "kmemcheck: error queue size" | ||
52 | depends on KMEMCHECK | ||
53 | default 64 | ||
54 | help | ||
55 | Select the maximum number of errors to store in the queue. Since | ||
56 | errors can occur virtually anywhere and in any context, we need a | ||
57 | temporary storage area which is guarantueed not to generate any | ||
58 | other faults. The queue will be emptied as soon as a tasklet may | ||
59 | be scheduled. If the queue is full, new error reports will be | ||
60 | lost. | ||
61 | |||
62 | config KMEMCHECK_SHADOW_COPY_SHIFT | ||
63 | int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)" | ||
64 | depends on KMEMCHECK | ||
65 | range 2 8 | ||
66 | default 5 | ||
67 | help | ||
68 | Select the number of shadow bytes to save along with each entry of | ||
69 | the queue. These bytes indicate what parts of an allocation are | ||
70 | initialized, uninitialized, etc. and will be displayed when an | ||
71 | error is detected to help the debugging of a particular problem. | ||
72 | |||
73 | config KMEMCHECK_PARTIAL_OK | ||
74 | bool "kmemcheck: allow partially uninitialized memory" | ||
75 | depends on KMEMCHECK | ||
76 | default y | ||
77 | help | ||
78 | This option works around certain GCC optimizations that produce | ||
79 | 32-bit reads from 16-bit variables where the upper 16 bits are | ||
80 | thrown away afterwards. This may of course also hide some real | ||
81 | bugs. | ||
82 | |||
83 | config KMEMCHECK_BITOPS_OK | ||
84 | bool "kmemcheck: allow bit-field manipulation" | ||
85 | depends on KMEMCHECK | ||
86 | default n | ||
87 | help | ||
88 | This option silences warnings that would be generated for bit-field | ||
89 | accesses where not all the bits are initialized at the same time. | ||
90 | This may also hide some real bugs. | ||
91 | |||
diff --git a/lib/Makefile b/lib/Makefile index d6edd6753f40..b6d1857bbf08 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o | |||
21 | 21 | ||
22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
24 | string_helpers.o | 24 | string_helpers.o gcd.o |
25 | 25 | ||
26 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 26 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
27 | CFLAGS_kobject.o += -DDEBUG | 27 | CFLAGS_kobject.o += -DDEBUG |
@@ -38,7 +38,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | |||
38 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | 38 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o |
39 | lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o | 39 | lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o |
40 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o | 40 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o |
41 | lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o | 41 | obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o |
42 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 42 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
43 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o | 43 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o |
44 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o | 44 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o |
@@ -50,6 +50,7 @@ ifneq ($(CONFIG_HAVE_DEC_LOCK),y) | |||
50 | endif | 50 | endif |
51 | 51 | ||
52 | obj-$(CONFIG_BITREVERSE) += bitrev.o | 52 | obj-$(CONFIG_BITREVERSE) += bitrev.o |
53 | obj-$(CONFIG_RATIONAL) += rational.o | ||
53 | obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o | 54 | obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o |
54 | obj-$(CONFIG_CRC16) += crc16.o | 55 | obj-$(CONFIG_CRC16) += crc16.o |
55 | obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o | 56 | obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o |
@@ -92,6 +93,10 @@ obj-$(CONFIG_NLATTR) += nlattr.o | |||
92 | 93 | ||
93 | obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o | 94 | obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o |
94 | 95 | ||
96 | obj-$(CONFIG_GENERIC_CSUM) += checksum.o | ||
97 | |||
98 | obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o | ||
99 | |||
95 | hostprogs-y := gen_crc32table | 100 | hostprogs-y := gen_crc32table |
96 | clean-files := crc32table.h | 101 | clean-files := crc32table.h |
97 | 102 | ||
diff --git a/lib/atomic64.c b/lib/atomic64.c new file mode 100644 index 000000000000..c5e725562416 --- /dev/null +++ b/lib/atomic64.c | |||
@@ -0,0 +1,175 @@ | |||
1 | /* | ||
2 | * Generic implementation of 64-bit atomics using spinlocks, | ||
3 | * useful on processors that don't have 64-bit atomic instructions. | ||
4 | * | ||
5 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/cache.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <asm/atomic.h> | ||
17 | |||
18 | /* | ||
19 | * We use a hashed array of spinlocks to provide exclusive access | ||
20 | * to each atomic64_t variable. Since this is expected to used on | ||
21 | * systems with small numbers of CPUs (<= 4 or so), we use a | ||
22 | * relatively small array of 16 spinlocks to avoid wasting too much | ||
23 | * memory on the spinlock array. | ||
24 | */ | ||
25 | #define NR_LOCKS 16 | ||
26 | |||
27 | /* | ||
28 | * Ensure each lock is in a separate cacheline. | ||
29 | */ | ||
30 | static union { | ||
31 | spinlock_t lock; | ||
32 | char pad[L1_CACHE_BYTES]; | ||
33 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; | ||
34 | |||
35 | static inline spinlock_t *lock_addr(const atomic64_t *v) | ||
36 | { | ||
37 | unsigned long addr = (unsigned long) v; | ||
38 | |||
39 | addr >>= L1_CACHE_SHIFT; | ||
40 | addr ^= (addr >> 8) ^ (addr >> 16); | ||
41 | return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; | ||
42 | } | ||
43 | |||
44 | long long atomic64_read(const atomic64_t *v) | ||
45 | { | ||
46 | unsigned long flags; | ||
47 | spinlock_t *lock = lock_addr(v); | ||
48 | long long val; | ||
49 | |||
50 | spin_lock_irqsave(lock, flags); | ||
51 | val = v->counter; | ||
52 | spin_unlock_irqrestore(lock, flags); | ||
53 | return val; | ||
54 | } | ||
55 | |||
56 | void atomic64_set(atomic64_t *v, long long i) | ||
57 | { | ||
58 | unsigned long flags; | ||
59 | spinlock_t *lock = lock_addr(v); | ||
60 | |||
61 | spin_lock_irqsave(lock, flags); | ||
62 | v->counter = i; | ||
63 | spin_unlock_irqrestore(lock, flags); | ||
64 | } | ||
65 | |||
66 | void atomic64_add(long long a, atomic64_t *v) | ||
67 | { | ||
68 | unsigned long flags; | ||
69 | spinlock_t *lock = lock_addr(v); | ||
70 | |||
71 | spin_lock_irqsave(lock, flags); | ||
72 | v->counter += a; | ||
73 | spin_unlock_irqrestore(lock, flags); | ||
74 | } | ||
75 | |||
76 | long long atomic64_add_return(long long a, atomic64_t *v) | ||
77 | { | ||
78 | unsigned long flags; | ||
79 | spinlock_t *lock = lock_addr(v); | ||
80 | long long val; | ||
81 | |||
82 | spin_lock_irqsave(lock, flags); | ||
83 | val = v->counter += a; | ||
84 | spin_unlock_irqrestore(lock, flags); | ||
85 | return val; | ||
86 | } | ||
87 | |||
88 | void atomic64_sub(long long a, atomic64_t *v) | ||
89 | { | ||
90 | unsigned long flags; | ||
91 | spinlock_t *lock = lock_addr(v); | ||
92 | |||
93 | spin_lock_irqsave(lock, flags); | ||
94 | v->counter -= a; | ||
95 | spin_unlock_irqrestore(lock, flags); | ||
96 | } | ||
97 | |||
98 | long long atomic64_sub_return(long long a, atomic64_t *v) | ||
99 | { | ||
100 | unsigned long flags; | ||
101 | spinlock_t *lock = lock_addr(v); | ||
102 | long long val; | ||
103 | |||
104 | spin_lock_irqsave(lock, flags); | ||
105 | val = v->counter -= a; | ||
106 | spin_unlock_irqrestore(lock, flags); | ||
107 | return val; | ||
108 | } | ||
109 | |||
110 | long long atomic64_dec_if_positive(atomic64_t *v) | ||
111 | { | ||
112 | unsigned long flags; | ||
113 | spinlock_t *lock = lock_addr(v); | ||
114 | long long val; | ||
115 | |||
116 | spin_lock_irqsave(lock, flags); | ||
117 | val = v->counter - 1; | ||
118 | if (val >= 0) | ||
119 | v->counter = val; | ||
120 | spin_unlock_irqrestore(lock, flags); | ||
121 | return val; | ||
122 | } | ||
123 | |||
124 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | ||
125 | { | ||
126 | unsigned long flags; | ||
127 | spinlock_t *lock = lock_addr(v); | ||
128 | long long val; | ||
129 | |||
130 | spin_lock_irqsave(lock, flags); | ||
131 | val = v->counter; | ||
132 | if (val == o) | ||
133 | v->counter = n; | ||
134 | spin_unlock_irqrestore(lock, flags); | ||
135 | return val; | ||
136 | } | ||
137 | |||
138 | long long atomic64_xchg(atomic64_t *v, long long new) | ||
139 | { | ||
140 | unsigned long flags; | ||
141 | spinlock_t *lock = lock_addr(v); | ||
142 | long long val; | ||
143 | |||
144 | spin_lock_irqsave(lock, flags); | ||
145 | val = v->counter; | ||
146 | v->counter = new; | ||
147 | spin_unlock_irqrestore(lock, flags); | ||
148 | return val; | ||
149 | } | ||
150 | |||
151 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) | ||
152 | { | ||
153 | unsigned long flags; | ||
154 | spinlock_t *lock = lock_addr(v); | ||
155 | int ret = 1; | ||
156 | |||
157 | spin_lock_irqsave(lock, flags); | ||
158 | if (v->counter != u) { | ||
159 | v->counter += a; | ||
160 | ret = 0; | ||
161 | } | ||
162 | spin_unlock_irqrestore(lock, flags); | ||
163 | return ret; | ||
164 | } | ||
165 | |||
166 | static int init_atomic64_lock(void) | ||
167 | { | ||
168 | int i; | ||
169 | |||
170 | for (i = 0; i < NR_LOCKS; ++i) | ||
171 | spin_lock_init(&atomic64_lock[i].lock); | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | pure_initcall(init_atomic64_lock); | ||
diff --git a/lib/checksum.c b/lib/checksum.c new file mode 100644 index 000000000000..12e5a1c91cda --- /dev/null +++ b/lib/checksum.c | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * | ||
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX | ||
4 | * operating system. INET is implemented using the BSD Socket | ||
5 | * interface as the means of communication with the user level. | ||
6 | * | ||
7 | * IP/TCP/UDP checksumming routines | ||
8 | * | ||
9 | * Authors: Jorge Cwik, <jorge@laser.satlink.net> | ||
10 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | ||
11 | * Tom May, <ftom@netcom.com> | ||
12 | * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> | ||
13 | * Lots of code moved from tcp.c and ip.c; see those files | ||
14 | * for more names. | ||
15 | * | ||
16 | * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: | ||
17 | * Fixed some nasty bugs, causing some horrible crashes. | ||
18 | * A: At some points, the sum (%0) was used as | ||
19 | * length-counter instead of the length counter | ||
20 | * (%1). Thanks to Roman Hodek for pointing this out. | ||
21 | * B: GCC seems to mess up if one uses too many | ||
22 | * data-registers to hold input values and one tries to | ||
23 | * specify d0 and d1 as scratch registers. Letting gcc | ||
24 | * choose these registers itself solves the problem. | ||
25 | * | ||
26 | * This program is free software; you can redistribute it and/or | ||
27 | * modify it under the terms of the GNU General Public License | ||
28 | * as published by the Free Software Foundation; either version | ||
29 | * 2 of the License, or (at your option) any later version. | ||
30 | */ | ||
31 | |||
32 | /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access | ||
33 | kills, so most of the assembly has to go. */ | ||
34 | |||
35 | #include <linux/module.h> | ||
36 | #include <net/checksum.h> | ||
37 | |||
38 | #include <asm/byteorder.h> | ||
39 | |||
40 | static inline unsigned short from32to16(unsigned long x) | ||
41 | { | ||
42 | /* add up 16-bit and 16-bit for 16+c bit */ | ||
43 | x = (x & 0xffff) + (x >> 16); | ||
44 | /* add up carry.. */ | ||
45 | x = (x & 0xffff) + (x >> 16); | ||
46 | return x; | ||
47 | } | ||
48 | |||
49 | static unsigned int do_csum(const unsigned char *buff, int len) | ||
50 | { | ||
51 | int odd, count; | ||
52 | unsigned long result = 0; | ||
53 | |||
54 | if (len <= 0) | ||
55 | goto out; | ||
56 | odd = 1 & (unsigned long) buff; | ||
57 | if (odd) { | ||
58 | result = *buff; | ||
59 | len--; | ||
60 | buff++; | ||
61 | } | ||
62 | count = len >> 1; /* nr of 16-bit words.. */ | ||
63 | if (count) { | ||
64 | if (2 & (unsigned long) buff) { | ||
65 | result += *(unsigned short *) buff; | ||
66 | count--; | ||
67 | len -= 2; | ||
68 | buff += 2; | ||
69 | } | ||
70 | count >>= 1; /* nr of 32-bit words.. */ | ||
71 | if (count) { | ||
72 | unsigned long carry = 0; | ||
73 | do { | ||
74 | unsigned long w = *(unsigned long *) buff; | ||
75 | count--; | ||
76 | buff += 4; | ||
77 | result += carry; | ||
78 | result += w; | ||
79 | carry = (w > result); | ||
80 | } while (count); | ||
81 | result += carry; | ||
82 | result = (result & 0xffff) + (result >> 16); | ||
83 | } | ||
84 | if (len & 2) { | ||
85 | result += *(unsigned short *) buff; | ||
86 | buff += 2; | ||
87 | } | ||
88 | } | ||
89 | if (len & 1) | ||
90 | result += (*buff << 8); | ||
91 | result = from32to16(result); | ||
92 | if (odd) | ||
93 | result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); | ||
94 | out: | ||
95 | return result; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * This is a version of ip_compute_csum() optimized for IP headers, | ||
100 | * which always checksum on 4 octet boundaries. | ||
101 | */ | ||
102 | __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | ||
103 | { | ||
104 | return (__force __sum16)~do_csum(iph, ihl*4); | ||
105 | } | ||
106 | EXPORT_SYMBOL(ip_fast_csum); | ||
107 | |||
108 | /* | ||
109 | * computes the checksum of a memory block at buff, length len, | ||
110 | * and adds in "sum" (32-bit) | ||
111 | * | ||
112 | * returns a 32-bit number suitable for feeding into itself | ||
113 | * or csum_tcpudp_magic | ||
114 | * | ||
115 | * this function must be called with even lengths, except | ||
116 | * for the last fragment, which may be odd | ||
117 | * | ||
118 | * it's best to have buff aligned on a 32-bit boundary | ||
119 | */ | ||
120 | __wsum csum_partial(const void *buff, int len, __wsum wsum) | ||
121 | { | ||
122 | unsigned int sum = (__force unsigned int)wsum; | ||
123 | unsigned int result = do_csum(buff, len); | ||
124 | |||
125 | /* add in old sum, and carry.. */ | ||
126 | result += sum; | ||
127 | if (sum > result) | ||
128 | result += 1; | ||
129 | return (__force __wsum)result; | ||
130 | } | ||
131 | EXPORT_SYMBOL(csum_partial); | ||
132 | |||
133 | /* | ||
134 | * this routine is used for miscellaneous IP-like checksums, mainly | ||
135 | * in icmp.c | ||
136 | */ | ||
137 | __sum16 ip_compute_csum(const void *buff, int len) | ||
138 | { | ||
139 | return (__force __sum16)~do_csum(buff, len); | ||
140 | } | ||
141 | EXPORT_SYMBOL(ip_compute_csum); | ||
142 | |||
143 | /* | ||
144 | * copy from fs while checksumming, otherwise like csum_partial | ||
145 | */ | ||
146 | __wsum | ||
147 | csum_partial_copy_from_user(const void __user *src, void *dst, int len, | ||
148 | __wsum sum, int *csum_err) | ||
149 | { | ||
150 | int missing; | ||
151 | |||
152 | missing = __copy_from_user(dst, src, len); | ||
153 | if (missing) { | ||
154 | memset(dst + len - missing, 0, missing); | ||
155 | *csum_err = -EFAULT; | ||
156 | } else | ||
157 | *csum_err = 0; | ||
158 | |||
159 | return csum_partial(dst, len, sum); | ||
160 | } | ||
161 | EXPORT_SYMBOL(csum_partial_copy_from_user); | ||
162 | |||
163 | /* | ||
164 | * copy from ds while checksumming, otherwise like csum_partial | ||
165 | */ | ||
166 | __wsum | ||
167 | csum_partial_copy(const void *src, void *dst, int len, __wsum sum) | ||
168 | { | ||
169 | memcpy(dst, src, len); | ||
170 | return csum_partial(dst, len, sum); | ||
171 | } | ||
172 | EXPORT_SYMBOL(csum_partial_copy); | ||
173 | |||
174 | #ifndef csum_tcpudp_nofold | ||
175 | __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | ||
176 | unsigned short len, | ||
177 | unsigned short proto, | ||
178 | __wsum sum) | ||
179 | { | ||
180 | unsigned long long s = (__force u32)sum; | ||
181 | |||
182 | s += (__force u32)saddr; | ||
183 | s += (__force u32)daddr; | ||
184 | #ifdef __BIG_ENDIAN | ||
185 | s += proto + len; | ||
186 | #else | ||
187 | s += (proto + len) << 8; | ||
188 | #endif | ||
189 | s += (s >> 32); | ||
190 | return (__force __wsum)s; | ||
191 | } | ||
192 | EXPORT_SYMBOL(csum_tcpudp_nofold); | ||
193 | #endif | ||
diff --git a/lib/cpumask.c b/lib/cpumask.c index 1f71b97de0f9..7bb4142a502f 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
@@ -92,15 +92,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) | |||
92 | */ | 92 | */ |
93 | bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) | 93 | bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) |
94 | { | 94 | { |
95 | if (likely(slab_is_available())) | 95 | *mask = kmalloc_node(cpumask_size(), flags, node); |
96 | *mask = kmalloc_node(cpumask_size(), flags, node); | 96 | |
97 | else { | ||
98 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
99 | printk(KERN_ERR | ||
100 | "=> alloc_cpumask_var: kmalloc not available!\n"); | ||
101 | #endif | ||
102 | *mask = NULL; | ||
103 | } | ||
104 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | 97 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
105 | if (!*mask) { | 98 | if (!*mask) { |
106 | printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); | 99 | printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); |
@@ -119,6 +112,12 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) | |||
119 | } | 112 | } |
120 | EXPORT_SYMBOL(alloc_cpumask_var_node); | 113 | EXPORT_SYMBOL(alloc_cpumask_var_node); |
121 | 114 | ||
115 | bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) | ||
116 | { | ||
117 | return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node); | ||
118 | } | ||
119 | EXPORT_SYMBOL(zalloc_cpumask_var_node); | ||
120 | |||
122 | /** | 121 | /** |
123 | * alloc_cpumask_var - allocate a struct cpumask | 122 | * alloc_cpumask_var - allocate a struct cpumask |
124 | * @mask: pointer to cpumask_var_t where the cpumask is returned | 123 | * @mask: pointer to cpumask_var_t where the cpumask is returned |
@@ -135,6 +134,12 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) | |||
135 | } | 134 | } |
136 | EXPORT_SYMBOL(alloc_cpumask_var); | 135 | EXPORT_SYMBOL(alloc_cpumask_var); |
137 | 136 | ||
137 | bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) | ||
138 | { | ||
139 | return alloc_cpumask_var(mask, flags | __GFP_ZERO); | ||
140 | } | ||
141 | EXPORT_SYMBOL(zalloc_cpumask_var); | ||
142 | |||
138 | /** | 143 | /** |
139 | * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. | 144 | * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. |
140 | * @mask: pointer to cpumask_var_t where the cpumask is returned | 145 | * @mask: pointer to cpumask_var_t where the cpumask is returned |
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index a65c31455541..e73822aa6e9a 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c | |||
@@ -19,11 +19,10 @@ | |||
19 | */ | 19 | */ |
20 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 20 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
21 | { | 21 | { |
22 | #ifdef CONFIG_SMP | ||
23 | /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ | 22 | /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ |
24 | if (atomic_add_unless(atomic, -1, 1)) | 23 | if (atomic_add_unless(atomic, -1, 1)) |
25 | return 0; | 24 | return 0; |
26 | #endif | 25 | |
27 | /* Otherwise do it the slow way */ | 26 | /* Otherwise do it the slow way */ |
28 | spin_lock(lock); | 27 | spin_lock(lock); |
29 | if (atomic_dec_and_test(atomic)) | 28 | if (atomic_dec_and_test(atomic)) |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index d3da7edc034f..3b93129a968c 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -23,9 +23,11 @@ | |||
23 | #include <linux/dma-debug.h> | 23 | #include <linux/dma-debug.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/debugfs.h> | 25 | #include <linux/debugfs.h> |
26 | #include <linux/uaccess.h> | ||
26 | #include <linux/device.h> | 27 | #include <linux/device.h> |
27 | #include <linux/types.h> | 28 | #include <linux/types.h> |
28 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
30 | #include <linux/ctype.h> | ||
29 | #include <linux/list.h> | 31 | #include <linux/list.h> |
30 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
31 | 33 | ||
@@ -85,6 +87,7 @@ static u32 show_num_errors = 1; | |||
85 | 87 | ||
86 | static u32 num_free_entries; | 88 | static u32 num_free_entries; |
87 | static u32 min_free_entries; | 89 | static u32 min_free_entries; |
90 | static u32 nr_total_entries; | ||
88 | 91 | ||
89 | /* number of preallocated entries requested by kernel cmdline */ | 92 | /* number of preallocated entries requested by kernel cmdline */ |
90 | static u32 req_entries; | 93 | static u32 req_entries; |
@@ -97,6 +100,16 @@ static struct dentry *show_all_errors_dent __read_mostly; | |||
97 | static struct dentry *show_num_errors_dent __read_mostly; | 100 | static struct dentry *show_num_errors_dent __read_mostly; |
98 | static struct dentry *num_free_entries_dent __read_mostly; | 101 | static struct dentry *num_free_entries_dent __read_mostly; |
99 | static struct dentry *min_free_entries_dent __read_mostly; | 102 | static struct dentry *min_free_entries_dent __read_mostly; |
103 | static struct dentry *filter_dent __read_mostly; | ||
104 | |||
105 | /* per-driver filter related state */ | ||
106 | |||
107 | #define NAME_MAX_LEN 64 | ||
108 | |||
109 | static char current_driver_name[NAME_MAX_LEN] __read_mostly; | ||
110 | static struct device_driver *current_driver __read_mostly; | ||
111 | |||
112 | static DEFINE_RWLOCK(driver_name_lock); | ||
100 | 113 | ||
101 | static const char *type2name[4] = { "single", "page", | 114 | static const char *type2name[4] = { "single", "page", |
102 | "scather-gather", "coherent" }; | 115 | "scather-gather", "coherent" }; |
@@ -104,6 +117,11 @@ static const char *type2name[4] = { "single", "page", | |||
104 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", | 117 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", |
105 | "DMA_FROM_DEVICE", "DMA_NONE" }; | 118 | "DMA_FROM_DEVICE", "DMA_NONE" }; |
106 | 119 | ||
120 | /* little merge helper - remove it after the merge window */ | ||
121 | #ifndef BUS_NOTIFY_UNBOUND_DRIVER | ||
122 | #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 | ||
123 | #endif | ||
124 | |||
107 | /* | 125 | /* |
108 | * The access to some variables in this macro is racy. We can't use atomic_t | 126 | * The access to some variables in this macro is racy. We can't use atomic_t |
109 | * here because all these variables are exported to debugfs. Some of them even | 127 | * here because all these variables are exported to debugfs. Some of them even |
@@ -121,15 +139,54 @@ static inline void dump_entry_trace(struct dma_debug_entry *entry) | |||
121 | { | 139 | { |
122 | #ifdef CONFIG_STACKTRACE | 140 | #ifdef CONFIG_STACKTRACE |
123 | if (entry) { | 141 | if (entry) { |
124 | printk(KERN_WARNING "Mapped at:\n"); | 142 | pr_warning("Mapped at:\n"); |
125 | print_stack_trace(&entry->stacktrace, 0); | 143 | print_stack_trace(&entry->stacktrace, 0); |
126 | } | 144 | } |
127 | #endif | 145 | #endif |
128 | } | 146 | } |
129 | 147 | ||
148 | static bool driver_filter(struct device *dev) | ||
149 | { | ||
150 | struct device_driver *drv; | ||
151 | unsigned long flags; | ||
152 | bool ret; | ||
153 | |||
154 | /* driver filter off */ | ||
155 | if (likely(!current_driver_name[0])) | ||
156 | return true; | ||
157 | |||
158 | /* driver filter on and initialized */ | ||
159 | if (current_driver && dev->driver == current_driver) | ||
160 | return true; | ||
161 | |||
162 | if (current_driver || !current_driver_name[0]) | ||
163 | return false; | ||
164 | |||
165 | /* driver filter on but not yet initialized */ | ||
166 | drv = get_driver(dev->driver); | ||
167 | if (!drv) | ||
168 | return false; | ||
169 | |||
170 | /* lock to protect against change of current_driver_name */ | ||
171 | read_lock_irqsave(&driver_name_lock, flags); | ||
172 | |||
173 | ret = false; | ||
174 | if (drv->name && | ||
175 | strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { | ||
176 | current_driver = drv; | ||
177 | ret = true; | ||
178 | } | ||
179 | |||
180 | read_unlock_irqrestore(&driver_name_lock, flags); | ||
181 | put_driver(drv); | ||
182 | |||
183 | return ret; | ||
184 | } | ||
185 | |||
130 | #define err_printk(dev, entry, format, arg...) do { \ | 186 | #define err_printk(dev, entry, format, arg...) do { \ |
131 | error_count += 1; \ | 187 | error_count += 1; \ |
132 | if (show_all_errors || show_num_errors > 0) { \ | 188 | if (driver_filter(dev) && \ |
189 | (show_all_errors || show_num_errors > 0)) { \ | ||
133 | WARN(1, "%s %s: " format, \ | 190 | WARN(1, "%s %s: " format, \ |
134 | dev_driver_string(dev), \ | 191 | dev_driver_string(dev), \ |
135 | dev_name(dev) , ## arg); \ | 192 | dev_name(dev) , ## arg); \ |
@@ -185,15 +242,51 @@ static void put_hash_bucket(struct hash_bucket *bucket, | |||
185 | static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | 242 | static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, |
186 | struct dma_debug_entry *ref) | 243 | struct dma_debug_entry *ref) |
187 | { | 244 | { |
188 | struct dma_debug_entry *entry; | 245 | struct dma_debug_entry *entry, *ret = NULL; |
246 | int matches = 0, match_lvl, last_lvl = 0; | ||
189 | 247 | ||
190 | list_for_each_entry(entry, &bucket->list, list) { | 248 | list_for_each_entry(entry, &bucket->list, list) { |
191 | if ((entry->dev_addr == ref->dev_addr) && | 249 | if ((entry->dev_addr != ref->dev_addr) || |
192 | (entry->dev == ref->dev)) | 250 | (entry->dev != ref->dev)) |
251 | continue; | ||
252 | |||
253 | /* | ||
254 | * Some drivers map the same physical address multiple | ||
255 | * times. Without a hardware IOMMU this results in the | ||
256 | * same device addresses being put into the dma-debug | ||
257 | * hash multiple times too. This can result in false | ||
258 | * positives being reported. Therfore we implement a | ||
259 | * best-fit algorithm here which returns the entry from | ||
260 | * the hash which fits best to the reference value | ||
261 | * instead of the first-fit. | ||
262 | */ | ||
263 | matches += 1; | ||
264 | match_lvl = 0; | ||
265 | entry->size == ref->size ? ++match_lvl : 0; | ||
266 | entry->type == ref->type ? ++match_lvl : 0; | ||
267 | entry->direction == ref->direction ? ++match_lvl : 0; | ||
268 | entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; | ||
269 | |||
270 | if (match_lvl == 4) { | ||
271 | /* perfect-fit - return the result */ | ||
193 | return entry; | 272 | return entry; |
273 | } else if (match_lvl > last_lvl) { | ||
274 | /* | ||
275 | * We found an entry that fits better then the | ||
276 | * previous one | ||
277 | */ | ||
278 | last_lvl = match_lvl; | ||
279 | ret = entry; | ||
280 | } | ||
194 | } | 281 | } |
195 | 282 | ||
196 | return NULL; | 283 | /* |
284 | * If we have multiple matches but no perfect-fit, just return | ||
285 | * NULL. | ||
286 | */ | ||
287 | ret = (matches == 1) ? ret : NULL; | ||
288 | |||
289 | return ret; | ||
197 | } | 290 | } |
198 | 291 | ||
199 | /* | 292 | /* |
@@ -257,6 +350,21 @@ static void add_dma_entry(struct dma_debug_entry *entry) | |||
257 | put_hash_bucket(bucket, &flags); | 350 | put_hash_bucket(bucket, &flags); |
258 | } | 351 | } |
259 | 352 | ||
353 | static struct dma_debug_entry *__dma_entry_alloc(void) | ||
354 | { | ||
355 | struct dma_debug_entry *entry; | ||
356 | |||
357 | entry = list_entry(free_entries.next, struct dma_debug_entry, list); | ||
358 | list_del(&entry->list); | ||
359 | memset(entry, 0, sizeof(*entry)); | ||
360 | |||
361 | num_free_entries -= 1; | ||
362 | if (num_free_entries < min_free_entries) | ||
363 | min_free_entries = num_free_entries; | ||
364 | |||
365 | return entry; | ||
366 | } | ||
367 | |||
260 | /* struct dma_entry allocator | 368 | /* struct dma_entry allocator |
261 | * | 369 | * |
262 | * The next two functions implement the allocator for | 370 | * The next two functions implement the allocator for |
@@ -270,15 +378,12 @@ static struct dma_debug_entry *dma_entry_alloc(void) | |||
270 | spin_lock_irqsave(&free_entries_lock, flags); | 378 | spin_lock_irqsave(&free_entries_lock, flags); |
271 | 379 | ||
272 | if (list_empty(&free_entries)) { | 380 | if (list_empty(&free_entries)) { |
273 | printk(KERN_ERR "DMA-API: debugging out of memory " | 381 | pr_err("DMA-API: debugging out of memory - disabling\n"); |
274 | "- disabling\n"); | ||
275 | global_disable = true; | 382 | global_disable = true; |
276 | goto out; | 383 | goto out; |
277 | } | 384 | } |
278 | 385 | ||
279 | entry = list_entry(free_entries.next, struct dma_debug_entry, list); | 386 | entry = __dma_entry_alloc(); |
280 | list_del(&entry->list); | ||
281 | memset(entry, 0, sizeof(*entry)); | ||
282 | 387 | ||
283 | #ifdef CONFIG_STACKTRACE | 388 | #ifdef CONFIG_STACKTRACE |
284 | entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; | 389 | entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; |
@@ -286,9 +391,6 @@ static struct dma_debug_entry *dma_entry_alloc(void) | |||
286 | entry->stacktrace.skip = 2; | 391 | entry->stacktrace.skip = 2; |
287 | save_stack_trace(&entry->stacktrace); | 392 | save_stack_trace(&entry->stacktrace); |
288 | #endif | 393 | #endif |
289 | num_free_entries -= 1; | ||
290 | if (num_free_entries < min_free_entries) | ||
291 | min_free_entries = num_free_entries; | ||
292 | 394 | ||
293 | out: | 395 | out: |
294 | spin_unlock_irqrestore(&free_entries_lock, flags); | 396 | spin_unlock_irqrestore(&free_entries_lock, flags); |
@@ -310,6 +412,53 @@ static void dma_entry_free(struct dma_debug_entry *entry) | |||
310 | spin_unlock_irqrestore(&free_entries_lock, flags); | 412 | spin_unlock_irqrestore(&free_entries_lock, flags); |
311 | } | 413 | } |
312 | 414 | ||
415 | int dma_debug_resize_entries(u32 num_entries) | ||
416 | { | ||
417 | int i, delta, ret = 0; | ||
418 | unsigned long flags; | ||
419 | struct dma_debug_entry *entry; | ||
420 | LIST_HEAD(tmp); | ||
421 | |||
422 | spin_lock_irqsave(&free_entries_lock, flags); | ||
423 | |||
424 | if (nr_total_entries < num_entries) { | ||
425 | delta = num_entries - nr_total_entries; | ||
426 | |||
427 | spin_unlock_irqrestore(&free_entries_lock, flags); | ||
428 | |||
429 | for (i = 0; i < delta; i++) { | ||
430 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | ||
431 | if (!entry) | ||
432 | break; | ||
433 | |||
434 | list_add_tail(&entry->list, &tmp); | ||
435 | } | ||
436 | |||
437 | spin_lock_irqsave(&free_entries_lock, flags); | ||
438 | |||
439 | list_splice(&tmp, &free_entries); | ||
440 | nr_total_entries += i; | ||
441 | num_free_entries += i; | ||
442 | } else { | ||
443 | delta = nr_total_entries - num_entries; | ||
444 | |||
445 | for (i = 0; i < delta && !list_empty(&free_entries); i++) { | ||
446 | entry = __dma_entry_alloc(); | ||
447 | kfree(entry); | ||
448 | } | ||
449 | |||
450 | nr_total_entries -= i; | ||
451 | } | ||
452 | |||
453 | if (nr_total_entries != num_entries) | ||
454 | ret = 1; | ||
455 | |||
456 | spin_unlock_irqrestore(&free_entries_lock, flags); | ||
457 | |||
458 | return ret; | ||
459 | } | ||
460 | EXPORT_SYMBOL(dma_debug_resize_entries); | ||
461 | |||
313 | /* | 462 | /* |
314 | * DMA-API debugging init code | 463 | * DMA-API debugging init code |
315 | * | 464 | * |
@@ -334,8 +483,7 @@ static int prealloc_memory(u32 num_entries) | |||
334 | num_free_entries = num_entries; | 483 | num_free_entries = num_entries; |
335 | min_free_entries = num_entries; | 484 | min_free_entries = num_entries; |
336 | 485 | ||
337 | printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", | 486 | pr_info("DMA-API: preallocated %d debug entries\n", num_entries); |
338 | num_entries); | ||
339 | 487 | ||
340 | return 0; | 488 | return 0; |
341 | 489 | ||
@@ -349,11 +497,102 @@ out_err: | |||
349 | return -ENOMEM; | 497 | return -ENOMEM; |
350 | } | 498 | } |
351 | 499 | ||
500 | static ssize_t filter_read(struct file *file, char __user *user_buf, | ||
501 | size_t count, loff_t *ppos) | ||
502 | { | ||
503 | char buf[NAME_MAX_LEN + 1]; | ||
504 | unsigned long flags; | ||
505 | int len; | ||
506 | |||
507 | if (!current_driver_name[0]) | ||
508 | return 0; | ||
509 | |||
510 | /* | ||
511 | * We can't copy to userspace directly because current_driver_name can | ||
512 | * only be read under the driver_name_lock with irqs disabled. So | ||
513 | * create a temporary copy first. | ||
514 | */ | ||
515 | read_lock_irqsave(&driver_name_lock, flags); | ||
516 | len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name); | ||
517 | read_unlock_irqrestore(&driver_name_lock, flags); | ||
518 | |||
519 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); | ||
520 | } | ||
521 | |||
522 | static ssize_t filter_write(struct file *file, const char __user *userbuf, | ||
523 | size_t count, loff_t *ppos) | ||
524 | { | ||
525 | char buf[NAME_MAX_LEN]; | ||
526 | unsigned long flags; | ||
527 | size_t len; | ||
528 | int i; | ||
529 | |||
530 | /* | ||
531 | * We can't copy from userspace directly. Access to | ||
532 | * current_driver_name is protected with a write_lock with irqs | ||
533 | * disabled. Since copy_from_user can fault and may sleep we | ||
534 | * need to copy to temporary buffer first | ||
535 | */ | ||
536 | len = min(count, (size_t)(NAME_MAX_LEN - 1)); | ||
537 | if (copy_from_user(buf, userbuf, len)) | ||
538 | return -EFAULT; | ||
539 | |||
540 | buf[len] = 0; | ||
541 | |||
542 | write_lock_irqsave(&driver_name_lock, flags); | ||
543 | |||
544 | /* | ||
545 | * Now handle the string we got from userspace very carefully. | ||
546 | * The rules are: | ||
547 | * - only use the first token we got | ||
548 | * - token delimiter is everything looking like a space | ||
549 | * character (' ', '\n', '\t' ...) | ||
550 | * | ||
551 | */ | ||
552 | if (!isalnum(buf[0])) { | ||
553 | /* | ||
554 | * If the first character userspace gave us is not | ||
555 | * alphanumerical then assume the filter should be | ||
556 | * switched off. | ||
557 | */ | ||
558 | if (current_driver_name[0]) | ||
559 | pr_info("DMA-API: switching off dma-debug driver filter\n"); | ||
560 | current_driver_name[0] = 0; | ||
561 | current_driver = NULL; | ||
562 | goto out_unlock; | ||
563 | } | ||
564 | |||
565 | /* | ||
566 | * Now parse out the first token and use it as the name for the | ||
567 | * driver to filter for. | ||
568 | */ | ||
569 | for (i = 0; i < NAME_MAX_LEN; ++i) { | ||
570 | current_driver_name[i] = buf[i]; | ||
571 | if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) | ||
572 | break; | ||
573 | } | ||
574 | current_driver_name[i] = 0; | ||
575 | current_driver = NULL; | ||
576 | |||
577 | pr_info("DMA-API: enable driver filter for driver [%s]\n", | ||
578 | current_driver_name); | ||
579 | |||
580 | out_unlock: | ||
581 | write_unlock_irqrestore(&driver_name_lock, flags); | ||
582 | |||
583 | return count; | ||
584 | } | ||
585 | |||
586 | const struct file_operations filter_fops = { | ||
587 | .read = filter_read, | ||
588 | .write = filter_write, | ||
589 | }; | ||
590 | |||
352 | static int dma_debug_fs_init(void) | 591 | static int dma_debug_fs_init(void) |
353 | { | 592 | { |
354 | dma_debug_dent = debugfs_create_dir("dma-api", NULL); | 593 | dma_debug_dent = debugfs_create_dir("dma-api", NULL); |
355 | if (!dma_debug_dent) { | 594 | if (!dma_debug_dent) { |
356 | printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); | 595 | pr_err("DMA-API: can not create debugfs directory\n"); |
357 | return -ENOMEM; | 596 | return -ENOMEM; |
358 | } | 597 | } |
359 | 598 | ||
@@ -392,6 +631,11 @@ static int dma_debug_fs_init(void) | |||
392 | if (!min_free_entries_dent) | 631 | if (!min_free_entries_dent) |
393 | goto out_err; | 632 | goto out_err; |
394 | 633 | ||
634 | filter_dent = debugfs_create_file("driver_filter", 0644, | ||
635 | dma_debug_dent, NULL, &filter_fops); | ||
636 | if (!filter_dent) | ||
637 | goto out_err; | ||
638 | |||
395 | return 0; | 639 | return 0; |
396 | 640 | ||
397 | out_err: | 641 | out_err: |
@@ -406,15 +650,19 @@ static int device_dma_allocations(struct device *dev) | |||
406 | unsigned long flags; | 650 | unsigned long flags; |
407 | int count = 0, i; | 651 | int count = 0, i; |
408 | 652 | ||
653 | local_irq_save(flags); | ||
654 | |||
409 | for (i = 0; i < HASH_SIZE; ++i) { | 655 | for (i = 0; i < HASH_SIZE; ++i) { |
410 | spin_lock_irqsave(&dma_entry_hash[i].lock, flags); | 656 | spin_lock(&dma_entry_hash[i].lock); |
411 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { | 657 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { |
412 | if (entry->dev == dev) | 658 | if (entry->dev == dev) |
413 | count += 1; | 659 | count += 1; |
414 | } | 660 | } |
415 | spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); | 661 | spin_unlock(&dma_entry_hash[i].lock); |
416 | } | 662 | } |
417 | 663 | ||
664 | local_irq_restore(flags); | ||
665 | |||
418 | return count; | 666 | return count; |
419 | } | 667 | } |
420 | 668 | ||
@@ -426,7 +674,7 @@ static int dma_debug_device_change(struct notifier_block *nb, | |||
426 | 674 | ||
427 | 675 | ||
428 | switch (action) { | 676 | switch (action) { |
429 | case BUS_NOTIFY_UNBIND_DRIVER: | 677 | case BUS_NOTIFY_UNBOUND_DRIVER: |
430 | count = device_dma_allocations(dev); | 678 | count = device_dma_allocations(dev); |
431 | if (count == 0) | 679 | if (count == 0) |
432 | break; | 680 | break; |
@@ -447,7 +695,7 @@ void dma_debug_add_bus(struct bus_type *bus) | |||
447 | 695 | ||
448 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); | 696 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); |
449 | if (nb == NULL) { | 697 | if (nb == NULL) { |
450 | printk(KERN_ERR "dma_debug_add_bus: out of memory\n"); | 698 | pr_err("dma_debug_add_bus: out of memory\n"); |
451 | return; | 699 | return; |
452 | } | 700 | } |
453 | 701 | ||
@@ -472,8 +720,7 @@ void dma_debug_init(u32 num_entries) | |||
472 | } | 720 | } |
473 | 721 | ||
474 | if (dma_debug_fs_init() != 0) { | 722 | if (dma_debug_fs_init() != 0) { |
475 | printk(KERN_ERR "DMA-API: error creating debugfs entries " | 723 | pr_err("DMA-API: error creating debugfs entries - disabling\n"); |
476 | "- disabling\n"); | ||
477 | global_disable = true; | 724 | global_disable = true; |
478 | 725 | ||
479 | return; | 726 | return; |
@@ -483,14 +730,15 @@ void dma_debug_init(u32 num_entries) | |||
483 | num_entries = req_entries; | 730 | num_entries = req_entries; |
484 | 731 | ||
485 | if (prealloc_memory(num_entries) != 0) { | 732 | if (prealloc_memory(num_entries) != 0) { |
486 | printk(KERN_ERR "DMA-API: debugging out of memory error " | 733 | pr_err("DMA-API: debugging out of memory error - disabled\n"); |
487 | "- disabled\n"); | ||
488 | global_disable = true; | 734 | global_disable = true; |
489 | 735 | ||
490 | return; | 736 | return; |
491 | } | 737 | } |
492 | 738 | ||
493 | printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); | 739 | nr_total_entries = num_free_entries; |
740 | |||
741 | pr_info("DMA-API: debugging enabled by kernel config\n"); | ||
494 | } | 742 | } |
495 | 743 | ||
496 | static __init int dma_debug_cmdline(char *str) | 744 | static __init int dma_debug_cmdline(char *str) |
@@ -499,8 +747,7 @@ static __init int dma_debug_cmdline(char *str) | |||
499 | return -EINVAL; | 747 | return -EINVAL; |
500 | 748 | ||
501 | if (strncmp(str, "off", 3) == 0) { | 749 | if (strncmp(str, "off", 3) == 0) { |
502 | printk(KERN_INFO "DMA-API: debugging disabled on kernel " | 750 | pr_info("DMA-API: debugging disabled on kernel command line\n"); |
503 | "command line\n"); | ||
504 | global_disable = true; | 751 | global_disable = true; |
505 | } | 752 | } |
506 | 753 | ||
@@ -627,72 +874,68 @@ static void check_for_illegal_area(struct device *dev, void *addr, u64 size) | |||
627 | "[addr=%p] [size=%llu]\n", addr, size); | 874 | "[addr=%p] [size=%llu]\n", addr, size); |
628 | } | 875 | } |
629 | 876 | ||
630 | static void check_sync(struct device *dev, dma_addr_t addr, | 877 | static void check_sync(struct device *dev, |
631 | u64 size, u64 offset, int direction, bool to_cpu) | 878 | struct dma_debug_entry *ref, |
879 | bool to_cpu) | ||
632 | { | 880 | { |
633 | struct dma_debug_entry ref = { | ||
634 | .dev = dev, | ||
635 | .dev_addr = addr, | ||
636 | .size = size, | ||
637 | .direction = direction, | ||
638 | }; | ||
639 | struct dma_debug_entry *entry; | 881 | struct dma_debug_entry *entry; |
640 | struct hash_bucket *bucket; | 882 | struct hash_bucket *bucket; |
641 | unsigned long flags; | 883 | unsigned long flags; |
642 | 884 | ||
643 | bucket = get_hash_bucket(&ref, &flags); | 885 | bucket = get_hash_bucket(ref, &flags); |
644 | 886 | ||
645 | entry = hash_bucket_find(bucket, &ref); | 887 | entry = hash_bucket_find(bucket, ref); |
646 | 888 | ||
647 | if (!entry) { | 889 | if (!entry) { |
648 | err_printk(dev, NULL, "DMA-API: device driver tries " | 890 | err_printk(dev, NULL, "DMA-API: device driver tries " |
649 | "to sync DMA memory it has not allocated " | 891 | "to sync DMA memory it has not allocated " |
650 | "[device address=0x%016llx] [size=%llu bytes]\n", | 892 | "[device address=0x%016llx] [size=%llu bytes]\n", |
651 | (unsigned long long)addr, size); | 893 | (unsigned long long)ref->dev_addr, ref->size); |
652 | goto out; | 894 | goto out; |
653 | } | 895 | } |
654 | 896 | ||
655 | if ((offset + size) > entry->size) { | 897 | if (ref->size > entry->size) { |
656 | err_printk(dev, entry, "DMA-API: device driver syncs" | 898 | err_printk(dev, entry, "DMA-API: device driver syncs" |
657 | " DMA memory outside allocated range " | 899 | " DMA memory outside allocated range " |
658 | "[device address=0x%016llx] " | 900 | "[device address=0x%016llx] " |
659 | "[allocation size=%llu bytes] [sync offset=%llu] " | 901 | "[allocation size=%llu bytes] " |
660 | "[sync size=%llu]\n", entry->dev_addr, entry->size, | 902 | "[sync offset+size=%llu]\n", |
661 | offset, size); | 903 | entry->dev_addr, entry->size, |
904 | ref->size); | ||
662 | } | 905 | } |
663 | 906 | ||
664 | if (direction != entry->direction) { | 907 | if (ref->direction != entry->direction) { |
665 | err_printk(dev, entry, "DMA-API: device driver syncs " | 908 | err_printk(dev, entry, "DMA-API: device driver syncs " |
666 | "DMA memory with different direction " | 909 | "DMA memory with different direction " |
667 | "[device address=0x%016llx] [size=%llu bytes] " | 910 | "[device address=0x%016llx] [size=%llu bytes] " |
668 | "[mapped with %s] [synced with %s]\n", | 911 | "[mapped with %s] [synced with %s]\n", |
669 | (unsigned long long)addr, entry->size, | 912 | (unsigned long long)ref->dev_addr, entry->size, |
670 | dir2name[entry->direction], | 913 | dir2name[entry->direction], |
671 | dir2name[direction]); | 914 | dir2name[ref->direction]); |
672 | } | 915 | } |
673 | 916 | ||
674 | if (entry->direction == DMA_BIDIRECTIONAL) | 917 | if (entry->direction == DMA_BIDIRECTIONAL) |
675 | goto out; | 918 | goto out; |
676 | 919 | ||
677 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && | 920 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && |
678 | !(direction == DMA_TO_DEVICE)) | 921 | !(ref->direction == DMA_TO_DEVICE)) |
679 | err_printk(dev, entry, "DMA-API: device driver syncs " | 922 | err_printk(dev, entry, "DMA-API: device driver syncs " |
680 | "device read-only DMA memory for cpu " | 923 | "device read-only DMA memory for cpu " |
681 | "[device address=0x%016llx] [size=%llu bytes] " | 924 | "[device address=0x%016llx] [size=%llu bytes] " |
682 | "[mapped with %s] [synced with %s]\n", | 925 | "[mapped with %s] [synced with %s]\n", |
683 | (unsigned long long)addr, entry->size, | 926 | (unsigned long long)ref->dev_addr, entry->size, |
684 | dir2name[entry->direction], | 927 | dir2name[entry->direction], |
685 | dir2name[direction]); | 928 | dir2name[ref->direction]); |
686 | 929 | ||
687 | if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && | 930 | if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && |
688 | !(direction == DMA_FROM_DEVICE)) | 931 | !(ref->direction == DMA_FROM_DEVICE)) |
689 | err_printk(dev, entry, "DMA-API: device driver syncs " | 932 | err_printk(dev, entry, "DMA-API: device driver syncs " |
690 | "device write-only DMA memory to device " | 933 | "device write-only DMA memory to device " |
691 | "[device address=0x%016llx] [size=%llu bytes] " | 934 | "[device address=0x%016llx] [size=%llu bytes] " |
692 | "[mapped with %s] [synced with %s]\n", | 935 | "[mapped with %s] [synced with %s]\n", |
693 | (unsigned long long)addr, entry->size, | 936 | (unsigned long long)ref->dev_addr, entry->size, |
694 | dir2name[entry->direction], | 937 | dir2name[entry->direction], |
695 | dir2name[direction]); | 938 | dir2name[ref->direction]); |
696 | 939 | ||
697 | out: | 940 | out: |
698 | put_hash_bucket(bucket, &flags); | 941 | put_hash_bucket(bucket, &flags); |
@@ -774,15 +1017,15 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
774 | entry->type = dma_debug_sg; | 1017 | entry->type = dma_debug_sg; |
775 | entry->dev = dev; | 1018 | entry->dev = dev; |
776 | entry->paddr = sg_phys(s); | 1019 | entry->paddr = sg_phys(s); |
777 | entry->size = s->length; | 1020 | entry->size = sg_dma_len(s); |
778 | entry->dev_addr = s->dma_address; | 1021 | entry->dev_addr = sg_dma_address(s); |
779 | entry->direction = direction; | 1022 | entry->direction = direction; |
780 | entry->sg_call_ents = nents; | 1023 | entry->sg_call_ents = nents; |
781 | entry->sg_mapped_ents = mapped_ents; | 1024 | entry->sg_mapped_ents = mapped_ents; |
782 | 1025 | ||
783 | if (!PageHighMem(sg_page(s))) { | 1026 | if (!PageHighMem(sg_page(s))) { |
784 | check_for_stack(dev, sg_virt(s)); | 1027 | check_for_stack(dev, sg_virt(s)); |
785 | check_for_illegal_area(dev, sg_virt(s), s->length); | 1028 | check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); |
786 | } | 1029 | } |
787 | 1030 | ||
788 | add_dma_entry(entry); | 1031 | add_dma_entry(entry); |
@@ -790,13 +1033,30 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
790 | } | 1033 | } |
791 | EXPORT_SYMBOL(debug_dma_map_sg); | 1034 | EXPORT_SYMBOL(debug_dma_map_sg); |
792 | 1035 | ||
1036 | static int get_nr_mapped_entries(struct device *dev, | ||
1037 | struct dma_debug_entry *ref) | ||
1038 | { | ||
1039 | struct dma_debug_entry *entry; | ||
1040 | struct hash_bucket *bucket; | ||
1041 | unsigned long flags; | ||
1042 | int mapped_ents; | ||
1043 | |||
1044 | bucket = get_hash_bucket(ref, &flags); | ||
1045 | entry = hash_bucket_find(bucket, ref); | ||
1046 | mapped_ents = 0; | ||
1047 | |||
1048 | if (entry) | ||
1049 | mapped_ents = entry->sg_mapped_ents; | ||
1050 | put_hash_bucket(bucket, &flags); | ||
1051 | |||
1052 | return mapped_ents; | ||
1053 | } | ||
1054 | |||
793 | void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | 1055 | void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
794 | int nelems, int dir) | 1056 | int nelems, int dir) |
795 | { | 1057 | { |
796 | struct dma_debug_entry *entry; | ||
797 | struct scatterlist *s; | 1058 | struct scatterlist *s; |
798 | int mapped_ents = 0, i; | 1059 | int mapped_ents = 0, i; |
799 | unsigned long flags; | ||
800 | 1060 | ||
801 | if (unlikely(global_disable)) | 1061 | if (unlikely(global_disable)) |
802 | return; | 1062 | return; |
@@ -807,24 +1067,17 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
807 | .type = dma_debug_sg, | 1067 | .type = dma_debug_sg, |
808 | .dev = dev, | 1068 | .dev = dev, |
809 | .paddr = sg_phys(s), | 1069 | .paddr = sg_phys(s), |
810 | .dev_addr = s->dma_address, | 1070 | .dev_addr = sg_dma_address(s), |
811 | .size = s->length, | 1071 | .size = sg_dma_len(s), |
812 | .direction = dir, | 1072 | .direction = dir, |
813 | .sg_call_ents = 0, | 1073 | .sg_call_ents = nelems, |
814 | }; | 1074 | }; |
815 | 1075 | ||
816 | if (mapped_ents && i >= mapped_ents) | 1076 | if (mapped_ents && i >= mapped_ents) |
817 | break; | 1077 | break; |
818 | 1078 | ||
819 | if (mapped_ents == 0) { | 1079 | if (!i) |
820 | struct hash_bucket *bucket; | 1080 | mapped_ents = get_nr_mapped_entries(dev, &ref); |
821 | ref.sg_call_ents = nelems; | ||
822 | bucket = get_hash_bucket(&ref, &flags); | ||
823 | entry = hash_bucket_find(bucket, &ref); | ||
824 | if (entry) | ||
825 | mapped_ents = entry->sg_mapped_ents; | ||
826 | put_hash_bucket(bucket, &flags); | ||
827 | } | ||
828 | 1081 | ||
829 | check_unmap(&ref); | 1082 | check_unmap(&ref); |
830 | } | 1083 | } |
@@ -879,10 +1132,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent); | |||
879 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 1132 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
880 | size_t size, int direction) | 1133 | size_t size, int direction) |
881 | { | 1134 | { |
1135 | struct dma_debug_entry ref; | ||
1136 | |||
882 | if (unlikely(global_disable)) | 1137 | if (unlikely(global_disable)) |
883 | return; | 1138 | return; |
884 | 1139 | ||
885 | check_sync(dev, dma_handle, size, 0, direction, true); | 1140 | ref.type = dma_debug_single; |
1141 | ref.dev = dev; | ||
1142 | ref.dev_addr = dma_handle; | ||
1143 | ref.size = size; | ||
1144 | ref.direction = direction; | ||
1145 | ref.sg_call_ents = 0; | ||
1146 | |||
1147 | check_sync(dev, &ref, true); | ||
886 | } | 1148 | } |
887 | EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); | 1149 | EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); |
888 | 1150 | ||
@@ -890,10 +1152,19 @@ void debug_dma_sync_single_for_device(struct device *dev, | |||
890 | dma_addr_t dma_handle, size_t size, | 1152 | dma_addr_t dma_handle, size_t size, |
891 | int direction) | 1153 | int direction) |
892 | { | 1154 | { |
1155 | struct dma_debug_entry ref; | ||
1156 | |||
893 | if (unlikely(global_disable)) | 1157 | if (unlikely(global_disable)) |
894 | return; | 1158 | return; |
895 | 1159 | ||
896 | check_sync(dev, dma_handle, size, 0, direction, false); | 1160 | ref.type = dma_debug_single; |
1161 | ref.dev = dev; | ||
1162 | ref.dev_addr = dma_handle; | ||
1163 | ref.size = size; | ||
1164 | ref.direction = direction; | ||
1165 | ref.sg_call_ents = 0; | ||
1166 | |||
1167 | check_sync(dev, &ref, false); | ||
897 | } | 1168 | } |
898 | EXPORT_SYMBOL(debug_dma_sync_single_for_device); | 1169 | EXPORT_SYMBOL(debug_dma_sync_single_for_device); |
899 | 1170 | ||
@@ -902,10 +1173,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev, | |||
902 | unsigned long offset, size_t size, | 1173 | unsigned long offset, size_t size, |
903 | int direction) | 1174 | int direction) |
904 | { | 1175 | { |
1176 | struct dma_debug_entry ref; | ||
1177 | |||
905 | if (unlikely(global_disable)) | 1178 | if (unlikely(global_disable)) |
906 | return; | 1179 | return; |
907 | 1180 | ||
908 | check_sync(dev, dma_handle, size, offset, direction, true); | 1181 | ref.type = dma_debug_single; |
1182 | ref.dev = dev; | ||
1183 | ref.dev_addr = dma_handle; | ||
1184 | ref.size = offset + size; | ||
1185 | ref.direction = direction; | ||
1186 | ref.sg_call_ents = 0; | ||
1187 | |||
1188 | check_sync(dev, &ref, true); | ||
909 | } | 1189 | } |
910 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); | 1190 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); |
911 | 1191 | ||
@@ -914,10 +1194,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev, | |||
914 | unsigned long offset, | 1194 | unsigned long offset, |
915 | size_t size, int direction) | 1195 | size_t size, int direction) |
916 | { | 1196 | { |
1197 | struct dma_debug_entry ref; | ||
1198 | |||
917 | if (unlikely(global_disable)) | 1199 | if (unlikely(global_disable)) |
918 | return; | 1200 | return; |
919 | 1201 | ||
920 | check_sync(dev, dma_handle, size, offset, direction, false); | 1202 | ref.type = dma_debug_single; |
1203 | ref.dev = dev; | ||
1204 | ref.dev_addr = dma_handle; | ||
1205 | ref.size = offset + size; | ||
1206 | ref.direction = direction; | ||
1207 | ref.sg_call_ents = 0; | ||
1208 | |||
1209 | check_sync(dev, &ref, false); | ||
921 | } | 1210 | } |
922 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); | 1211 | EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); |
923 | 1212 | ||
@@ -925,14 +1214,30 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
925 | int nelems, int direction) | 1214 | int nelems, int direction) |
926 | { | 1215 | { |
927 | struct scatterlist *s; | 1216 | struct scatterlist *s; |
928 | int i; | 1217 | int mapped_ents = 0, i; |
929 | 1218 | ||
930 | if (unlikely(global_disable)) | 1219 | if (unlikely(global_disable)) |
931 | return; | 1220 | return; |
932 | 1221 | ||
933 | for_each_sg(sg, s, nelems, i) { | 1222 | for_each_sg(sg, s, nelems, i) { |
934 | check_sync(dev, s->dma_address, s->dma_length, 0, | 1223 | |
935 | direction, true); | 1224 | struct dma_debug_entry ref = { |
1225 | .type = dma_debug_sg, | ||
1226 | .dev = dev, | ||
1227 | .paddr = sg_phys(s), | ||
1228 | .dev_addr = sg_dma_address(s), | ||
1229 | .size = sg_dma_len(s), | ||
1230 | .direction = direction, | ||
1231 | .sg_call_ents = nelems, | ||
1232 | }; | ||
1233 | |||
1234 | if (!i) | ||
1235 | mapped_ents = get_nr_mapped_entries(dev, &ref); | ||
1236 | |||
1237 | if (i >= mapped_ents) | ||
1238 | break; | ||
1239 | |||
1240 | check_sync(dev, &ref, true); | ||
936 | } | 1241 | } |
937 | } | 1242 | } |
938 | EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); | 1243 | EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); |
@@ -941,15 +1246,48 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
941 | int nelems, int direction) | 1246 | int nelems, int direction) |
942 | { | 1247 | { |
943 | struct scatterlist *s; | 1248 | struct scatterlist *s; |
944 | int i; | 1249 | int mapped_ents = 0, i; |
945 | 1250 | ||
946 | if (unlikely(global_disable)) | 1251 | if (unlikely(global_disable)) |
947 | return; | 1252 | return; |
948 | 1253 | ||
949 | for_each_sg(sg, s, nelems, i) { | 1254 | for_each_sg(sg, s, nelems, i) { |
950 | check_sync(dev, s->dma_address, s->dma_length, 0, | 1255 | |
951 | direction, false); | 1256 | struct dma_debug_entry ref = { |
1257 | .type = dma_debug_sg, | ||
1258 | .dev = dev, | ||
1259 | .paddr = sg_phys(s), | ||
1260 | .dev_addr = sg_dma_address(s), | ||
1261 | .size = sg_dma_len(s), | ||
1262 | .direction = direction, | ||
1263 | .sg_call_ents = nelems, | ||
1264 | }; | ||
1265 | if (!i) | ||
1266 | mapped_ents = get_nr_mapped_entries(dev, &ref); | ||
1267 | |||
1268 | if (i >= mapped_ents) | ||
1269 | break; | ||
1270 | |||
1271 | check_sync(dev, &ref, false); | ||
952 | } | 1272 | } |
953 | } | 1273 | } |
954 | EXPORT_SYMBOL(debug_dma_sync_sg_for_device); | 1274 | EXPORT_SYMBOL(debug_dma_sync_sg_for_device); |
955 | 1275 | ||
1276 | static int __init dma_debug_driver_setup(char *str) | ||
1277 | { | ||
1278 | int i; | ||
1279 | |||
1280 | for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { | ||
1281 | current_driver_name[i] = *str; | ||
1282 | if (*str == 0) | ||
1283 | break; | ||
1284 | } | ||
1285 | |||
1286 | if (current_driver_name[0]) | ||
1287 | pr_info("DMA-API: enable driver filter for driver [%s]\n", | ||
1288 | current_driver_name); | ||
1289 | |||
1290 | |||
1291 | return 1; | ||
1292 | } | ||
1293 | __setup("dma_debug_driver=", dma_debug_driver_setup); | ||
diff --git a/lib/extable.c b/lib/extable.c index 179c08745595..4cac81ec225e 100644 --- a/lib/extable.c +++ b/lib/extable.c | |||
@@ -39,7 +39,26 @@ void sort_extable(struct exception_table_entry *start, | |||
39 | sort(start, finish - start, sizeof(struct exception_table_entry), | 39 | sort(start, finish - start, sizeof(struct exception_table_entry), |
40 | cmp_ex, NULL); | 40 | cmp_ex, NULL); |
41 | } | 41 | } |
42 | #endif | 42 | |
43 | #ifdef CONFIG_MODULES | ||
44 | /* | ||
45 | * If the exception table is sorted, any referring to the module init | ||
46 | * will be at the beginning or the end. | ||
47 | */ | ||
48 | void trim_init_extable(struct module *m) | ||
49 | { | ||
50 | /*trim the beginning*/ | ||
51 | while (m->num_exentries && within_module_init(m->extable[0].insn, m)) { | ||
52 | m->extable++; | ||
53 | m->num_exentries--; | ||
54 | } | ||
55 | /*trim the end*/ | ||
56 | while (m->num_exentries && | ||
57 | within_module_init(m->extable[m->num_exentries-1].insn, m)) | ||
58 | m->num_exentries--; | ||
59 | } | ||
60 | #endif /* CONFIG_MODULES */ | ||
61 | #endif /* !ARCH_HAS_SORT_EXTABLE */ | ||
43 | 62 | ||
44 | #ifndef ARCH_HAS_SEARCH_EXTABLE | 63 | #ifndef ARCH_HAS_SEARCH_EXTABLE |
45 | /* | 64 | /* |
diff --git a/lib/gcd.c b/lib/gcd.c new file mode 100644 index 000000000000..f879033d9822 --- /dev/null +++ b/lib/gcd.c | |||
@@ -0,0 +1,18 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/gcd.h> | ||
3 | #include <linux/module.h> | ||
4 | |||
5 | /* Greatest common divisor */ | ||
6 | unsigned long gcd(unsigned long a, unsigned long b) | ||
7 | { | ||
8 | unsigned long r; | ||
9 | |||
10 | if (a < b) | ||
11 | swap(a, b); | ||
12 | while ((r = a % b) != 0) { | ||
13 | a = b; | ||
14 | b = r; | ||
15 | } | ||
16 | return b; | ||
17 | } | ||
18 | EXPORT_SYMBOL_GPL(gcd); | ||
diff --git a/lib/genalloc.c b/lib/genalloc.c index f6d276db2d58..eed2bdb865e7 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -85,7 +85,6 @@ void gen_pool_destroy(struct gen_pool *pool) | |||
85 | int bit, end_bit; | 85 | int bit, end_bit; |
86 | 86 | ||
87 | 87 | ||
88 | write_lock(&pool->lock); | ||
89 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { | 88 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { |
90 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | 89 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); |
91 | list_del(&chunk->next_chunk); | 90 | list_del(&chunk->next_chunk); |
diff --git a/lib/hexdump.c b/lib/hexdump.c index f07c0db81d26..39af2560f765 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c | |||
@@ -65,7 +65,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, | |||
65 | 65 | ||
66 | for (j = 0; j < ngroups; j++) | 66 | for (j = 0; j < ngroups; j++) |
67 | lx += scnprintf(linebuf + lx, linebuflen - lx, | 67 | lx += scnprintf(linebuf + lx, linebuflen - lx, |
68 | "%16.16llx ", (unsigned long long)*(ptr8 + j)); | 68 | "%s%16.16llx", j ? " " : "", |
69 | (unsigned long long)*(ptr8 + j)); | ||
69 | ascii_column = 17 * ngroups + 2; | 70 | ascii_column = 17 * ngroups + 2; |
70 | break; | 71 | break; |
71 | } | 72 | } |
@@ -76,7 +77,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, | |||
76 | 77 | ||
77 | for (j = 0; j < ngroups; j++) | 78 | for (j = 0; j < ngroups; j++) |
78 | lx += scnprintf(linebuf + lx, linebuflen - lx, | 79 | lx += scnprintf(linebuf + lx, linebuflen - lx, |
79 | "%8.8x ", *(ptr4 + j)); | 80 | "%s%8.8x", j ? " " : "", *(ptr4 + j)); |
80 | ascii_column = 9 * ngroups + 2; | 81 | ascii_column = 9 * ngroups + 2; |
81 | break; | 82 | break; |
82 | } | 83 | } |
@@ -87,19 +88,21 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, | |||
87 | 88 | ||
88 | for (j = 0; j < ngroups; j++) | 89 | for (j = 0; j < ngroups; j++) |
89 | lx += scnprintf(linebuf + lx, linebuflen - lx, | 90 | lx += scnprintf(linebuf + lx, linebuflen - lx, |
90 | "%4.4x ", *(ptr2 + j)); | 91 | "%s%4.4x", j ? " " : "", *(ptr2 + j)); |
91 | ascii_column = 5 * ngroups + 2; | 92 | ascii_column = 5 * ngroups + 2; |
92 | break; | 93 | break; |
93 | } | 94 | } |
94 | 95 | ||
95 | default: | 96 | default: |
96 | for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; | 97 | for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { |
97 | j++) { | ||
98 | ch = ptr[j]; | 98 | ch = ptr[j]; |
99 | linebuf[lx++] = hex_asc_hi(ch); | 99 | linebuf[lx++] = hex_asc_hi(ch); |
100 | linebuf[lx++] = hex_asc_lo(ch); | 100 | linebuf[lx++] = hex_asc_lo(ch); |
101 | linebuf[lx++] = ' '; | 101 | linebuf[lx++] = ' '; |
102 | } | 102 | } |
103 | if (j) | ||
104 | lx--; | ||
105 | |||
103 | ascii_column = 3 * rowsize + 2; | 106 | ascii_column = 3 * rowsize + 2; |
104 | break; | 107 | break; |
105 | } | 108 | } |
@@ -108,7 +111,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, | |||
108 | 111 | ||
109 | while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) | 112 | while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) |
110 | linebuf[lx++] = ' '; | 113 | linebuf[lx++] = ' '; |
111 | for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++) | 114 | for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) |
112 | linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] | 115 | linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] |
113 | : '.'; | 116 | : '.'; |
114 | nil: | 117 | nil: |
diff --git a/lib/kobject.c b/lib/kobject.c index bacf6fe4f7a0..b512b746d2af 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -793,11 +793,16 @@ static struct kset *kset_create(const char *name, | |||
793 | struct kobject *parent_kobj) | 793 | struct kobject *parent_kobj) |
794 | { | 794 | { |
795 | struct kset *kset; | 795 | struct kset *kset; |
796 | int retval; | ||
796 | 797 | ||
797 | kset = kzalloc(sizeof(*kset), GFP_KERNEL); | 798 | kset = kzalloc(sizeof(*kset), GFP_KERNEL); |
798 | if (!kset) | 799 | if (!kset) |
799 | return NULL; | 800 | return NULL; |
800 | kobject_set_name(&kset->kobj, name); | 801 | retval = kobject_set_name(&kset->kobj, name); |
802 | if (retval) { | ||
803 | kfree(kset); | ||
804 | return NULL; | ||
805 | } | ||
801 | kset->uevent_ops = uevent_ops; | 806 | kset->uevent_ops = uevent_ops; |
802 | kset->kobj.parent = parent_kobj; | 807 | kset->kobj.parent = parent_kobj; |
803 | 808 | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 4bb42a0344ec..23abbd93cae1 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -351,20 +351,12 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
351 | } | 351 | } |
352 | EXPORT_SYMBOL(radix_tree_insert); | 352 | EXPORT_SYMBOL(radix_tree_insert); |
353 | 353 | ||
354 | /** | 354 | /* |
355 | * radix_tree_lookup_slot - lookup a slot in a radix tree | 355 | * is_slot == 1 : search for the slot. |
356 | * @root: radix tree root | 356 | * is_slot == 0 : search for the node. |
357 | * @index: index key | ||
358 | * | ||
359 | * Returns: the slot corresponding to the position @index in the | ||
360 | * radix tree @root. This is useful for update-if-exists operations. | ||
361 | * | ||
362 | * This function can be called under rcu_read_lock iff the slot is not | ||
363 | * modified by radix_tree_replace_slot, otherwise it must be called | ||
364 | * exclusive from other writers. Any dereference of the slot must be done | ||
365 | * using radix_tree_deref_slot. | ||
366 | */ | 357 | */ |
367 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | 358 | static void *radix_tree_lookup_element(struct radix_tree_root *root, |
359 | unsigned long index, int is_slot) | ||
368 | { | 360 | { |
369 | unsigned int height, shift; | 361 | unsigned int height, shift; |
370 | struct radix_tree_node *node, **slot; | 362 | struct radix_tree_node *node, **slot; |
@@ -376,7 +368,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | |||
376 | if (!radix_tree_is_indirect_ptr(node)) { | 368 | if (!radix_tree_is_indirect_ptr(node)) { |
377 | if (index > 0) | 369 | if (index > 0) |
378 | return NULL; | 370 | return NULL; |
379 | return (void **)&root->rnode; | 371 | return is_slot ? (void *)&root->rnode : node; |
380 | } | 372 | } |
381 | node = radix_tree_indirect_to_ptr(node); | 373 | node = radix_tree_indirect_to_ptr(node); |
382 | 374 | ||
@@ -397,7 +389,25 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | |||
397 | height--; | 389 | height--; |
398 | } while (height > 0); | 390 | } while (height > 0); |
399 | 391 | ||
400 | return (void **)slot; | 392 | return is_slot ? (void *)slot:node; |
393 | } | ||
394 | |||
395 | /** | ||
396 | * radix_tree_lookup_slot - lookup a slot in a radix tree | ||
397 | * @root: radix tree root | ||
398 | * @index: index key | ||
399 | * | ||
400 | * Returns: the slot corresponding to the position @index in the | ||
401 | * radix tree @root. This is useful for update-if-exists operations. | ||
402 | * | ||
403 | * This function can be called under rcu_read_lock iff the slot is not | ||
404 | * modified by radix_tree_replace_slot, otherwise it must be called | ||
405 | * exclusive from other writers. Any dereference of the slot must be done | ||
406 | * using radix_tree_deref_slot. | ||
407 | */ | ||
408 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | ||
409 | { | ||
410 | return (void **)radix_tree_lookup_element(root, index, 1); | ||
401 | } | 411 | } |
402 | EXPORT_SYMBOL(radix_tree_lookup_slot); | 412 | EXPORT_SYMBOL(radix_tree_lookup_slot); |
403 | 413 | ||
@@ -415,38 +425,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot); | |||
415 | */ | 425 | */ |
416 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) | 426 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) |
417 | { | 427 | { |
418 | unsigned int height, shift; | 428 | return radix_tree_lookup_element(root, index, 0); |
419 | struct radix_tree_node *node, **slot; | ||
420 | |||
421 | node = rcu_dereference(root->rnode); | ||
422 | if (node == NULL) | ||
423 | return NULL; | ||
424 | |||
425 | if (!radix_tree_is_indirect_ptr(node)) { | ||
426 | if (index > 0) | ||
427 | return NULL; | ||
428 | return node; | ||
429 | } | ||
430 | node = radix_tree_indirect_to_ptr(node); | ||
431 | |||
432 | height = node->height; | ||
433 | if (index > radix_tree_maxindex(height)) | ||
434 | return NULL; | ||
435 | |||
436 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | ||
437 | |||
438 | do { | ||
439 | slot = (struct radix_tree_node **) | ||
440 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); | ||
441 | node = rcu_dereference(*slot); | ||
442 | if (node == NULL) | ||
443 | return NULL; | ||
444 | |||
445 | shift -= RADIX_TREE_MAP_SHIFT; | ||
446 | height--; | ||
447 | } while (height > 0); | ||
448 | |||
449 | return node; | ||
450 | } | 429 | } |
451 | EXPORT_SYMBOL(radix_tree_lookup); | 430 | EXPORT_SYMBOL(radix_tree_lookup); |
452 | 431 | ||
@@ -666,6 +645,43 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root, | |||
666 | } | 645 | } |
667 | EXPORT_SYMBOL(radix_tree_next_hole); | 646 | EXPORT_SYMBOL(radix_tree_next_hole); |
668 | 647 | ||
648 | /** | ||
649 | * radix_tree_prev_hole - find the prev hole (not-present entry) | ||
650 | * @root: tree root | ||
651 | * @index: index key | ||
652 | * @max_scan: maximum range to search | ||
653 | * | ||
654 | * Search backwards in the range [max(index-max_scan+1, 0), index] | ||
655 | * for the first hole. | ||
656 | * | ||
657 | * Returns: the index of the hole if found, otherwise returns an index | ||
658 | * outside of the set specified (in which case 'index - return >= max_scan' | ||
659 | * will be true). In rare cases of wrap-around, LONG_MAX will be returned. | ||
660 | * | ||
661 | * radix_tree_next_hole may be called under rcu_read_lock. However, like | ||
662 | * radix_tree_gang_lookup, this will not atomically search a snapshot of | ||
663 | * the tree at a single point in time. For example, if a hole is created | ||
664 | * at index 10, then subsequently a hole is created at index 5, | ||
665 | * radix_tree_prev_hole covering both indexes may return 5 if called under | ||
666 | * rcu_read_lock. | ||
667 | */ | ||
668 | unsigned long radix_tree_prev_hole(struct radix_tree_root *root, | ||
669 | unsigned long index, unsigned long max_scan) | ||
670 | { | ||
671 | unsigned long i; | ||
672 | |||
673 | for (i = 0; i < max_scan; i++) { | ||
674 | if (!radix_tree_lookup(root, index)) | ||
675 | break; | ||
676 | index--; | ||
677 | if (index == LONG_MAX) | ||
678 | break; | ||
679 | } | ||
680 | |||
681 | return index; | ||
682 | } | ||
683 | EXPORT_SYMBOL(radix_tree_prev_hole); | ||
684 | |||
669 | static unsigned int | 685 | static unsigned int |
670 | __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, | 686 | __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, |
671 | unsigned int max_items, unsigned long *next_index) | 687 | unsigned int max_items, unsigned long *next_index) |
diff --git a/lib/rational.c b/lib/rational.c new file mode 100644 index 000000000000..b3c099b5478e --- /dev/null +++ b/lib/rational.c | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * rational fractions | ||
3 | * | ||
4 | * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <os@emlix.com> | ||
5 | * | ||
6 | * helper functions when coping with rational numbers | ||
7 | */ | ||
8 | |||
9 | #include <linux/rational.h> | ||
10 | |||
11 | /* | ||
12 | * calculate best rational approximation for a given fraction | ||
13 | * taking into account restricted register size, e.g. to find | ||
14 | * appropriate values for a pll with 5 bit denominator and | ||
15 | * 8 bit numerator register fields, trying to set up with a | ||
16 | * frequency ratio of 3.1415, one would say: | ||
17 | * | ||
18 | * rational_best_approximation(31415, 10000, | ||
19 | * (1 << 8) - 1, (1 << 5) - 1, &n, &d); | ||
20 | * | ||
21 | * you may look at given_numerator as a fixed point number, | ||
22 | * with the fractional part size described in given_denominator. | ||
23 | * | ||
24 | * for theoretical background, see: | ||
25 | * http://en.wikipedia.org/wiki/Continued_fraction | ||
26 | */ | ||
27 | |||
28 | void rational_best_approximation( | ||
29 | unsigned long given_numerator, unsigned long given_denominator, | ||
30 | unsigned long max_numerator, unsigned long max_denominator, | ||
31 | unsigned long *best_numerator, unsigned long *best_denominator) | ||
32 | { | ||
33 | unsigned long n, d, n0, d0, n1, d1; | ||
34 | n = given_numerator; | ||
35 | d = given_denominator; | ||
36 | n0 = d1 = 0; | ||
37 | n1 = d0 = 1; | ||
38 | for (;;) { | ||
39 | unsigned long t, a; | ||
40 | if ((n1 > max_numerator) || (d1 > max_denominator)) { | ||
41 | n1 = n0; | ||
42 | d1 = d0; | ||
43 | break; | ||
44 | } | ||
45 | if (d == 0) | ||
46 | break; | ||
47 | t = d; | ||
48 | a = n / d; | ||
49 | d = n % d; | ||
50 | n = t; | ||
51 | t = n0 + a * n1; | ||
52 | n0 = n1; | ||
53 | n1 = t; | ||
54 | t = d0 + a * d1; | ||
55 | d0 = d1; | ||
56 | d1 = t; | ||
57 | } | ||
58 | *best_numerator = n1; | ||
59 | *best_denominator = d1; | ||
60 | } | ||
61 | |||
62 | EXPORT_SYMBOL(rational_best_approximation); | ||
diff --git a/lib/rbtree.c b/lib/rbtree.c index f653659e0bc1..e2aa3be29858 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c | |||
@@ -231,34 +231,34 @@ void rb_erase(struct rb_node *node, struct rb_root *root) | |||
231 | node = node->rb_right; | 231 | node = node->rb_right; |
232 | while ((left = node->rb_left) != NULL) | 232 | while ((left = node->rb_left) != NULL) |
233 | node = left; | 233 | node = left; |
234 | |||
235 | if (rb_parent(old)) { | ||
236 | if (rb_parent(old)->rb_left == old) | ||
237 | rb_parent(old)->rb_left = node; | ||
238 | else | ||
239 | rb_parent(old)->rb_right = node; | ||
240 | } else | ||
241 | root->rb_node = node; | ||
242 | |||
234 | child = node->rb_right; | 243 | child = node->rb_right; |
235 | parent = rb_parent(node); | 244 | parent = rb_parent(node); |
236 | color = rb_color(node); | 245 | color = rb_color(node); |
237 | 246 | ||
238 | if (child) | ||
239 | rb_set_parent(child, parent); | ||
240 | if (parent == old) { | 247 | if (parent == old) { |
241 | parent->rb_right = child; | ||
242 | parent = node; | 248 | parent = node; |
243 | } else | 249 | } else { |
250 | if (child) | ||
251 | rb_set_parent(child, parent); | ||
244 | parent->rb_left = child; | 252 | parent->rb_left = child; |
245 | 253 | ||
254 | node->rb_right = old->rb_right; | ||
255 | rb_set_parent(old->rb_right, node); | ||
256 | } | ||
257 | |||
246 | node->rb_parent_color = old->rb_parent_color; | 258 | node->rb_parent_color = old->rb_parent_color; |
247 | node->rb_right = old->rb_right; | ||
248 | node->rb_left = old->rb_left; | 259 | node->rb_left = old->rb_left; |
249 | |||
250 | if (rb_parent(old)) | ||
251 | { | ||
252 | if (rb_parent(old)->rb_left == old) | ||
253 | rb_parent(old)->rb_left = node; | ||
254 | else | ||
255 | rb_parent(old)->rb_right = node; | ||
256 | } else | ||
257 | root->rb_node = node; | ||
258 | |||
259 | rb_set_parent(old->rb_left, node); | 260 | rb_set_parent(old->rb_left, node); |
260 | if (old->rb_right) | 261 | |
261 | rb_set_parent(old->rb_right, node); | ||
262 | goto color; | 262 | goto color; |
263 | } | 263 | } |
264 | 264 | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index b7b449dafbe5..a295e404e908 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -347,9 +347,12 @@ bool sg_miter_next(struct sg_mapping_iter *miter) | |||
347 | sg_miter_stop(miter); | 347 | sg_miter_stop(miter); |
348 | 348 | ||
349 | /* get to the next sg if necessary. __offset is adjusted by stop */ | 349 | /* get to the next sg if necessary. __offset is adjusted by stop */ |
350 | if (miter->__offset == miter->__sg->length && --miter->__nents) { | 350 | while (miter->__offset == miter->__sg->length) { |
351 | miter->__sg = sg_next(miter->__sg); | 351 | if (--miter->__nents) { |
352 | miter->__offset = 0; | 352 | miter->__sg = sg_next(miter->__sg); |
353 | miter->__offset = 0; | ||
354 | } else | ||
355 | return false; | ||
353 | } | 356 | } |
354 | 357 | ||
355 | /* map the next page */ | 358 | /* map the next page */ |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 2b0b5a7d2ced..bffe6d7ef9d9 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -60,8 +60,8 @@ enum dma_sync_target { | |||
60 | int swiotlb_force; | 60 | int swiotlb_force; |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * Used to do a quick range check in swiotlb_unmap_single and | 63 | * Used to do a quick range check in unmap_single and |
64 | * swiotlb_sync_single_*, to see if the memory was in fact allocated by this | 64 | * sync_single_*, to see if the memory was in fact allocated by this |
65 | * API. | 65 | * API. |
66 | */ | 66 | */ |
67 | static char *io_tlb_start, *io_tlb_end; | 67 | static char *io_tlb_start, *io_tlb_end; |
@@ -129,7 +129,7 @@ dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | |||
129 | return paddr; | 129 | return paddr; |
130 | } | 130 | } |
131 | 131 | ||
132 | phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr) | 132 | phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) |
133 | { | 133 | { |
134 | return baddr; | 134 | return baddr; |
135 | } | 135 | } |
@@ -140,9 +140,15 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, | |||
140 | return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); | 140 | return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); |
141 | } | 141 | } |
142 | 142 | ||
143 | static void *swiotlb_bus_to_virt(dma_addr_t address) | 143 | void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address) |
144 | { | 144 | { |
145 | return phys_to_virt(swiotlb_bus_to_phys(address)); | 145 | return phys_to_virt(swiotlb_bus_to_phys(hwdev, address)); |
146 | } | ||
147 | |||
148 | int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev, | ||
149 | dma_addr_t addr, size_t size) | ||
150 | { | ||
151 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | ||
146 | } | 152 | } |
147 | 153 | ||
148 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) | 154 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) |
@@ -309,10 +315,10 @@ cleanup1: | |||
309 | return -ENOMEM; | 315 | return -ENOMEM; |
310 | } | 316 | } |
311 | 317 | ||
312 | static int | 318 | static inline int |
313 | address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) | 319 | address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) |
314 | { | 320 | { |
315 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | 321 | return swiotlb_arch_address_needs_mapping(hwdev, addr, size); |
316 | } | 322 | } |
317 | 323 | ||
318 | static inline int range_needs_mapping(phys_addr_t paddr, size_t size) | 324 | static inline int range_needs_mapping(phys_addr_t paddr, size_t size) |
@@ -341,7 +347,7 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, | |||
341 | unsigned long flags; | 347 | unsigned long flags; |
342 | 348 | ||
343 | while (size) { | 349 | while (size) { |
344 | sz = min(PAGE_SIZE - offset, size); | 350 | sz = min_t(size_t, PAGE_SIZE - offset, size); |
345 | 351 | ||
346 | local_irq_save(flags); | 352 | local_irq_save(flags); |
347 | buffer = kmap_atomic(pfn_to_page(pfn), | 353 | buffer = kmap_atomic(pfn_to_page(pfn), |
@@ -476,7 +482,7 @@ found: | |||
476 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. | 482 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. |
477 | */ | 483 | */ |
478 | static void | 484 | static void |
479 | unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | 485 | do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) |
480 | { | 486 | { |
481 | unsigned long flags; | 487 | unsigned long flags; |
482 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 488 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
@@ -560,7 +566,6 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
560 | size)) { | 566 | size)) { |
561 | /* | 567 | /* |
562 | * The allocated memory isn't reachable by the device. | 568 | * The allocated memory isn't reachable by the device. |
563 | * Fall back on swiotlb_map_single(). | ||
564 | */ | 569 | */ |
565 | free_pages((unsigned long) ret, order); | 570 | free_pages((unsigned long) ret, order); |
566 | ret = NULL; | 571 | ret = NULL; |
@@ -568,9 +573,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
568 | if (!ret) { | 573 | if (!ret) { |
569 | /* | 574 | /* |
570 | * We are either out of memory or the device can't DMA | 575 | * We are either out of memory or the device can't DMA |
571 | * to GFP_DMA memory; fall back on | 576 | * to GFP_DMA memory; fall back on map_single(), which |
572 | * swiotlb_map_single(), which will grab memory from | 577 | * will grab memory from the lowest available address range. |
573 | * the lowest available address range. | ||
574 | */ | 578 | */ |
575 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); | 579 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); |
576 | if (!ret) | 580 | if (!ret) |
@@ -587,7 +591,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
587 | (unsigned long long)dev_addr); | 591 | (unsigned long long)dev_addr); |
588 | 592 | ||
589 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 593 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
590 | unmap_single(hwdev, ret, size, DMA_TO_DEVICE); | 594 | do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); |
591 | return NULL; | 595 | return NULL; |
592 | } | 596 | } |
593 | *dma_handle = dev_addr; | 597 | *dma_handle = dev_addr; |
@@ -604,7 +608,7 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
604 | free_pages((unsigned long) vaddr, get_order(size)); | 608 | free_pages((unsigned long) vaddr, get_order(size)); |
605 | else | 609 | else |
606 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 610 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
607 | unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); | 611 | do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); |
608 | } | 612 | } |
609 | EXPORT_SYMBOL(swiotlb_free_coherent); | 613 | EXPORT_SYMBOL(swiotlb_free_coherent); |
610 | 614 | ||
@@ -634,7 +638,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | |||
634 | * physical address to use is returned. | 638 | * physical address to use is returned. |
635 | * | 639 | * |
636 | * Once the device is given the dma address, the device owns this memory until | 640 | * Once the device is given the dma address, the device owns this memory until |
637 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. | 641 | * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed. |
638 | */ | 642 | */ |
639 | dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | 643 | dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, |
640 | unsigned long offset, size_t size, | 644 | unsigned long offset, size_t size, |
@@ -642,18 +646,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
642 | struct dma_attrs *attrs) | 646 | struct dma_attrs *attrs) |
643 | { | 647 | { |
644 | phys_addr_t phys = page_to_phys(page) + offset; | 648 | phys_addr_t phys = page_to_phys(page) + offset; |
645 | void *ptr = page_address(page) + offset; | ||
646 | dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); | 649 | dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); |
647 | void *map; | 650 | void *map; |
648 | 651 | ||
649 | BUG_ON(dir == DMA_NONE); | 652 | BUG_ON(dir == DMA_NONE); |
650 | /* | 653 | /* |
651 | * If the pointer passed in happens to be in the device's DMA window, | 654 | * If the address happens to be in the device's DMA window, |
652 | * we can safely return the device addr and not worry about bounce | 655 | * we can safely return the device addr and not worry about bounce |
653 | * buffering it. | 656 | * buffering it. |
654 | */ | 657 | */ |
655 | if (!address_needs_mapping(dev, dev_addr, size) && | 658 | if (!address_needs_mapping(dev, dev_addr, size) && |
656 | !range_needs_mapping(virt_to_phys(ptr), size)) | 659 | !range_needs_mapping(phys, size)) |
657 | return dev_addr; | 660 | return dev_addr; |
658 | 661 | ||
659 | /* | 662 | /* |
@@ -679,23 +682,35 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); | |||
679 | 682 | ||
680 | /* | 683 | /* |
681 | * Unmap a single streaming mode DMA translation. The dma_addr and size must | 684 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
682 | * match what was provided for in a previous swiotlb_map_single call. All | 685 | * match what was provided for in a previous swiotlb_map_page call. All |
683 | * other usages are undefined. | 686 | * other usages are undefined. |
684 | * | 687 | * |
685 | * After this call, reads by the cpu to the buffer are guaranteed to see | 688 | * After this call, reads by the cpu to the buffer are guaranteed to see |
686 | * whatever the device wrote there. | 689 | * whatever the device wrote there. |
687 | */ | 690 | */ |
691 | static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, | ||
692 | size_t size, int dir) | ||
693 | { | ||
694 | char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); | ||
695 | |||
696 | BUG_ON(dir == DMA_NONE); | ||
697 | |||
698 | if (is_swiotlb_buffer(dma_addr)) { | ||
699 | do_unmap_single(hwdev, dma_addr, size, dir); | ||
700 | return; | ||
701 | } | ||
702 | |||
703 | if (dir != DMA_FROM_DEVICE) | ||
704 | return; | ||
705 | |||
706 | dma_mark_clean(dma_addr, size); | ||
707 | } | ||
708 | |||
688 | void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | 709 | void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
689 | size_t size, enum dma_data_direction dir, | 710 | size_t size, enum dma_data_direction dir, |
690 | struct dma_attrs *attrs) | 711 | struct dma_attrs *attrs) |
691 | { | 712 | { |
692 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); | 713 | unmap_single(hwdev, dev_addr, size, dir); |
693 | |||
694 | BUG_ON(dir == DMA_NONE); | ||
695 | if (is_swiotlb_buffer(dma_addr)) | ||
696 | unmap_single(hwdev, dma_addr, size, dir); | ||
697 | else if (dir == DMA_FROM_DEVICE) | ||
698 | dma_mark_clean(dma_addr, size); | ||
699 | } | 714 | } |
700 | EXPORT_SYMBOL_GPL(swiotlb_unmap_page); | 715 | EXPORT_SYMBOL_GPL(swiotlb_unmap_page); |
701 | 716 | ||
@@ -703,7 +718,7 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page); | |||
703 | * Make physical memory consistent for a single streaming mode DMA translation | 718 | * Make physical memory consistent for a single streaming mode DMA translation |
704 | * after a transfer. | 719 | * after a transfer. |
705 | * | 720 | * |
706 | * If you perform a swiotlb_map_single() but wish to interrogate the buffer | 721 | * If you perform a swiotlb_map_page() but wish to interrogate the buffer |
707 | * using the cpu, yet do not wish to teardown the dma mapping, you must | 722 | * using the cpu, yet do not wish to teardown the dma mapping, you must |
708 | * call this function before doing so. At the next point you give the dma | 723 | * call this function before doing so. At the next point you give the dma |
709 | * address back to the card, you must first perform a | 724 | * address back to the card, you must first perform a |
@@ -713,13 +728,19 @@ static void | |||
713 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | 728 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
714 | size_t size, int dir, int target) | 729 | size_t size, int dir, int target) |
715 | { | 730 | { |
716 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); | 731 | char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); |
717 | 732 | ||
718 | BUG_ON(dir == DMA_NONE); | 733 | BUG_ON(dir == DMA_NONE); |
719 | if (is_swiotlb_buffer(dma_addr)) | 734 | |
735 | if (is_swiotlb_buffer(dma_addr)) { | ||
720 | sync_single(hwdev, dma_addr, size, dir, target); | 736 | sync_single(hwdev, dma_addr, size, dir, target); |
721 | else if (dir == DMA_FROM_DEVICE) | 737 | return; |
722 | dma_mark_clean(dma_addr, size); | 738 | } |
739 | |||
740 | if (dir != DMA_FROM_DEVICE) | ||
741 | return; | ||
742 | |||
743 | dma_mark_clean(dma_addr, size); | ||
723 | } | 744 | } |
724 | 745 | ||
725 | void | 746 | void |
@@ -746,13 +767,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | |||
746 | unsigned long offset, size_t size, | 767 | unsigned long offset, size_t size, |
747 | int dir, int target) | 768 | int dir, int target) |
748 | { | 769 | { |
749 | char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset; | 770 | swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target); |
750 | |||
751 | BUG_ON(dir == DMA_NONE); | ||
752 | if (is_swiotlb_buffer(dma_addr)) | ||
753 | sync_single(hwdev, dma_addr, size, dir, target); | ||
754 | else if (dir == DMA_FROM_DEVICE) | ||
755 | dma_mark_clean(dma_addr, size); | ||
756 | } | 771 | } |
757 | 772 | ||
758 | void | 773 | void |
@@ -777,7 +792,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | |||
777 | 792 | ||
778 | /* | 793 | /* |
779 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | 794 | * Map a set of buffers described by scatterlist in streaming mode for DMA. |
780 | * This is the scatter-gather version of the above swiotlb_map_single | 795 | * This is the scatter-gather version of the above swiotlb_map_page |
781 | * interface. Here the scatter gather list elements are each tagged with the | 796 | * interface. Here the scatter gather list elements are each tagged with the |
782 | * appropriate dma address and length. They are obtained via | 797 | * appropriate dma address and length. They are obtained via |
783 | * sg_dma_{address,length}(SG). | 798 | * sg_dma_{address,length}(SG). |
@@ -788,7 +803,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | |||
788 | * The routine returns the number of addr/length pairs actually | 803 | * The routine returns the number of addr/length pairs actually |
789 | * used, at most nents. | 804 | * used, at most nents. |
790 | * | 805 | * |
791 | * Device ownership issues as mentioned above for swiotlb_map_single are the | 806 | * Device ownership issues as mentioned above for swiotlb_map_page are the |
792 | * same here. | 807 | * same here. |
793 | */ | 808 | */ |
794 | int | 809 | int |
@@ -836,7 +851,7 @@ EXPORT_SYMBOL(swiotlb_map_sg); | |||
836 | 851 | ||
837 | /* | 852 | /* |
838 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules | 853 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules |
839 | * concerning calls here are the same as for swiotlb_unmap_single() above. | 854 | * concerning calls here are the same as for swiotlb_unmap_page() above. |
840 | */ | 855 | */ |
841 | void | 856 | void |
842 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 857 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
@@ -847,13 +862,9 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
847 | 862 | ||
848 | BUG_ON(dir == DMA_NONE); | 863 | BUG_ON(dir == DMA_NONE); |
849 | 864 | ||
850 | for_each_sg(sgl, sg, nelems, i) { | 865 | for_each_sg(sgl, sg, nelems, i) |
851 | if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) | 866 | unmap_single(hwdev, sg->dma_address, sg->dma_length, dir); |
852 | unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), | 867 | |
853 | sg->dma_length, dir); | ||
854 | else if (dir == DMA_FROM_DEVICE) | ||
855 | dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); | ||
856 | } | ||
857 | } | 868 | } |
858 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); | 869 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); |
859 | 870 | ||
@@ -879,15 +890,9 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | |||
879 | struct scatterlist *sg; | 890 | struct scatterlist *sg; |
880 | int i; | 891 | int i; |
881 | 892 | ||
882 | BUG_ON(dir == DMA_NONE); | 893 | for_each_sg(sgl, sg, nelems, i) |
883 | 894 | swiotlb_sync_single(hwdev, sg->dma_address, | |
884 | for_each_sg(sgl, sg, nelems, i) { | ||
885 | if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) | ||
886 | sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), | ||
887 | sg->dma_length, dir, target); | 895 | sg->dma_length, dir, target); |
888 | else if (dir == DMA_FROM_DEVICE) | ||
889 | dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); | ||
890 | } | ||
891 | } | 896 | } |
892 | 897 | ||
893 | void | 898 | void |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 7536acea135b..756ccafa9cec 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -408,6 +408,8 @@ enum format_type { | |||
408 | FORMAT_TYPE_LONG_LONG, | 408 | FORMAT_TYPE_LONG_LONG, |
409 | FORMAT_TYPE_ULONG, | 409 | FORMAT_TYPE_ULONG, |
410 | FORMAT_TYPE_LONG, | 410 | FORMAT_TYPE_LONG, |
411 | FORMAT_TYPE_UBYTE, | ||
412 | FORMAT_TYPE_BYTE, | ||
411 | FORMAT_TYPE_USHORT, | 413 | FORMAT_TYPE_USHORT, |
412 | FORMAT_TYPE_SHORT, | 414 | FORMAT_TYPE_SHORT, |
413 | FORMAT_TYPE_UINT, | 415 | FORMAT_TYPE_UINT, |
@@ -573,12 +575,15 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec) | |||
573 | } | 575 | } |
574 | 576 | ||
575 | static char *symbol_string(char *buf, char *end, void *ptr, | 577 | static char *symbol_string(char *buf, char *end, void *ptr, |
576 | struct printf_spec spec) | 578 | struct printf_spec spec, char ext) |
577 | { | 579 | { |
578 | unsigned long value = (unsigned long) ptr; | 580 | unsigned long value = (unsigned long) ptr; |
579 | #ifdef CONFIG_KALLSYMS | 581 | #ifdef CONFIG_KALLSYMS |
580 | char sym[KSYM_SYMBOL_LEN]; | 582 | char sym[KSYM_SYMBOL_LEN]; |
581 | sprint_symbol(sym, value); | 583 | if (ext != 'f') |
584 | sprint_symbol(sym, value); | ||
585 | else | ||
586 | kallsyms_lookup(value, NULL, NULL, NULL, sym); | ||
582 | return string(buf, end, sym, spec); | 587 | return string(buf, end, sym, spec); |
583 | #else | 588 | #else |
584 | spec.field_width = 2*sizeof(void *); | 589 | spec.field_width = 2*sizeof(void *); |
@@ -690,7 +695,8 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr, | |||
690 | * | 695 | * |
691 | * Right now we handle: | 696 | * Right now we handle: |
692 | * | 697 | * |
693 | * - 'F' For symbolic function descriptor pointers | 698 | * - 'F' For symbolic function descriptor pointers with offset |
699 | * - 'f' For simple symbolic function names without offset | ||
694 | * - 'S' For symbolic direct pointers | 700 | * - 'S' For symbolic direct pointers |
695 | * - 'R' For a struct resource pointer, it prints the range of | 701 | * - 'R' For a struct resource pointer, it prints the range of |
696 | * addresses (not the name nor the flags) | 702 | * addresses (not the name nor the flags) |
@@ -713,10 +719,11 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
713 | 719 | ||
714 | switch (*fmt) { | 720 | switch (*fmt) { |
715 | case 'F': | 721 | case 'F': |
722 | case 'f': | ||
716 | ptr = dereference_function_descriptor(ptr); | 723 | ptr = dereference_function_descriptor(ptr); |
717 | /* Fallthrough */ | 724 | /* Fallthrough */ |
718 | case 'S': | 725 | case 'S': |
719 | return symbol_string(buf, end, ptr, spec); | 726 | return symbol_string(buf, end, ptr, spec, *fmt); |
720 | case 'R': | 727 | case 'R': |
721 | return resource_string(buf, end, ptr, spec); | 728 | return resource_string(buf, end, ptr, spec); |
722 | case 'm': | 729 | case 'm': |
@@ -853,11 +860,15 @@ qualifier: | |||
853 | spec->qualifier = -1; | 860 | spec->qualifier = -1; |
854 | if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || | 861 | if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || |
855 | *fmt == 'Z' || *fmt == 'z' || *fmt == 't') { | 862 | *fmt == 'Z' || *fmt == 'z' || *fmt == 't') { |
856 | spec->qualifier = *fmt; | 863 | spec->qualifier = *fmt++; |
857 | ++fmt; | 864 | if (unlikely(spec->qualifier == *fmt)) { |
858 | if (spec->qualifier == 'l' && *fmt == 'l') { | 865 | if (spec->qualifier == 'l') { |
859 | spec->qualifier = 'L'; | 866 | spec->qualifier = 'L'; |
860 | ++fmt; | 867 | ++fmt; |
868 | } else if (spec->qualifier == 'h') { | ||
869 | spec->qualifier = 'H'; | ||
870 | ++fmt; | ||
871 | } | ||
861 | } | 872 | } |
862 | } | 873 | } |
863 | 874 | ||
@@ -919,6 +930,11 @@ qualifier: | |||
919 | spec->type = FORMAT_TYPE_SIZE_T; | 930 | spec->type = FORMAT_TYPE_SIZE_T; |
920 | } else if (spec->qualifier == 't') { | 931 | } else if (spec->qualifier == 't') { |
921 | spec->type = FORMAT_TYPE_PTRDIFF; | 932 | spec->type = FORMAT_TYPE_PTRDIFF; |
933 | } else if (spec->qualifier == 'H') { | ||
934 | if (spec->flags & SIGN) | ||
935 | spec->type = FORMAT_TYPE_BYTE; | ||
936 | else | ||
937 | spec->type = FORMAT_TYPE_UBYTE; | ||
922 | } else if (spec->qualifier == 'h') { | 938 | } else if (spec->qualifier == 'h') { |
923 | if (spec->flags & SIGN) | 939 | if (spec->flags & SIGN) |
924 | spec->type = FORMAT_TYPE_SHORT; | 940 | spec->type = FORMAT_TYPE_SHORT; |
@@ -943,7 +959,8 @@ qualifier: | |||
943 | * | 959 | * |
944 | * This function follows C99 vsnprintf, but has some extensions: | 960 | * This function follows C99 vsnprintf, but has some extensions: |
945 | * %pS output the name of a text symbol | 961 | * %pS output the name of a text symbol |
946 | * %pF output the name of a function pointer | 962 | * %pF output the name of a function pointer with its offset |
963 | * %pf output the name of a function pointer without its offset | ||
947 | * %pR output the address range in a struct resource | 964 | * %pR output the address range in a struct resource |
948 | * | 965 | * |
949 | * The return value is the number of characters which would | 966 | * The return value is the number of characters which would |
@@ -1087,6 +1104,12 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
1087 | case FORMAT_TYPE_PTRDIFF: | 1104 | case FORMAT_TYPE_PTRDIFF: |
1088 | num = va_arg(args, ptrdiff_t); | 1105 | num = va_arg(args, ptrdiff_t); |
1089 | break; | 1106 | break; |
1107 | case FORMAT_TYPE_UBYTE: | ||
1108 | num = (unsigned char) va_arg(args, int); | ||
1109 | break; | ||
1110 | case FORMAT_TYPE_BYTE: | ||
1111 | num = (signed char) va_arg(args, int); | ||
1112 | break; | ||
1090 | case FORMAT_TYPE_USHORT: | 1113 | case FORMAT_TYPE_USHORT: |
1091 | num = (unsigned short) va_arg(args, int); | 1114 | num = (unsigned short) va_arg(args, int); |
1092 | break; | 1115 | break; |
@@ -1363,6 +1386,10 @@ do { \ | |||
1363 | case FORMAT_TYPE_PTRDIFF: | 1386 | case FORMAT_TYPE_PTRDIFF: |
1364 | save_arg(ptrdiff_t); | 1387 | save_arg(ptrdiff_t); |
1365 | break; | 1388 | break; |
1389 | case FORMAT_TYPE_UBYTE: | ||
1390 | case FORMAT_TYPE_BYTE: | ||
1391 | save_arg(char); | ||
1392 | break; | ||
1366 | case FORMAT_TYPE_USHORT: | 1393 | case FORMAT_TYPE_USHORT: |
1367 | case FORMAT_TYPE_SHORT: | 1394 | case FORMAT_TYPE_SHORT: |
1368 | save_arg(short); | 1395 | save_arg(short); |
@@ -1391,7 +1418,8 @@ EXPORT_SYMBOL_GPL(vbin_printf); | |||
1391 | * | 1418 | * |
1392 | * The format follows C99 vsnprintf, but has some extensions: | 1419 | * The format follows C99 vsnprintf, but has some extensions: |
1393 | * %pS output the name of a text symbol | 1420 | * %pS output the name of a text symbol |
1394 | * %pF output the name of a function pointer | 1421 | * %pF output the name of a function pointer with its offset |
1422 | * %pf output the name of a function pointer without its offset | ||
1395 | * %pR output the address range in a struct resource | 1423 | * %pR output the address range in a struct resource |
1396 | * %n is ignored | 1424 | * %n is ignored |
1397 | * | 1425 | * |
@@ -1538,6 +1566,12 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
1538 | case FORMAT_TYPE_PTRDIFF: | 1566 | case FORMAT_TYPE_PTRDIFF: |
1539 | num = get_arg(ptrdiff_t); | 1567 | num = get_arg(ptrdiff_t); |
1540 | break; | 1568 | break; |
1569 | case FORMAT_TYPE_UBYTE: | ||
1570 | num = get_arg(unsigned char); | ||
1571 | break; | ||
1572 | case FORMAT_TYPE_BYTE: | ||
1573 | num = get_arg(signed char); | ||
1574 | break; | ||
1541 | case FORMAT_TYPE_USHORT: | 1575 | case FORMAT_TYPE_USHORT: |
1542 | num = get_arg(unsigned short); | 1576 | num = get_arg(unsigned short); |
1543 | break; | 1577 | break; |