aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug62
-rw-r--r--lib/Makefile2
-rw-r--r--lib/atomic64_test.c2
-rw-r--r--lib/devres.c2
-rw-r--r--lib/dynamic_debug.c2
-rw-r--r--lib/genalloc.c1
-rw-r--r--lib/idr.c4
-rw-r--r--lib/lmb.c541
-rw-r--r--lib/random32.c2
-rw-r--r--lib/rbtree.c116
-rw-r--r--lib/swiotlb.c137
-rw-r--r--lib/vsprintf.c9
13 files changed, 211 insertions, 672 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 170d8ca901d8..5b916bc0fbae 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -181,9 +181,6 @@ config HAS_DMA
181config CHECK_SIGNATURE 181config CHECK_SIGNATURE
182 bool 182 bool
183 183
184config HAVE_LMB
185 boolean
186
187config CPUMASK_OFFSTACK 184config CPUMASK_OFFSTACK
188 bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS 185 bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
189 help 186 help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e722e9d62221..ff87ddc4cbd5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -152,28 +152,33 @@ config DEBUG_SHIRQ
152 Drivers ought to be able to handle interrupts coming in at those 152 Drivers ought to be able to handle interrupts coming in at those
153 points; some don't and need to be caught. 153 points; some don't and need to be caught.
154 154
155config DETECT_SOFTLOCKUP 155config LOCKUP_DETECTOR
156 bool "Detect Soft Lockups" 156 bool "Detect Hard and Soft Lockups"
157 depends on DEBUG_KERNEL && !S390 157 depends on DEBUG_KERNEL && !S390
158 default y
159 help 158 help
160 Say Y here to enable the kernel to detect "soft lockups", 159 Say Y here to enable the kernel to act as a watchdog to detect
161 which are bugs that cause the kernel to loop in kernel 160 hard and soft lockups.
161
162 Softlockups are bugs that cause the kernel to loop in kernel
162 mode for more than 60 seconds, without giving other tasks a 163 mode for more than 60 seconds, without giving other tasks a
163 chance to run. 164 chance to run. The current stack trace is displayed upon
165 detection and the system will stay locked up.
164 166
165 When a soft-lockup is detected, the kernel will print the 167 Hardlockups are bugs that cause the CPU to loop in kernel mode
166 current stack trace (which you should report), but the 168 for more than 60 seconds, without letting other interrupts have a
167 system will stay locked up. This feature has negligible 169 chance to run. The current stack trace is displayed upon detection
168 overhead. 170 and the system will stay locked up.
169 171
170 (Note that "hard lockups" are separate type of bugs that 172 The overhead should be minimal. A periodic hrtimer runs to
171 can be detected via the NMI-watchdog, on platforms that 173 generate interrupts and kick the watchdog task every 10-12 seconds.
172 support it.) 174 An NMI is generated every 60 seconds or so to check for hardlockups.
175
176config HARDLOCKUP_DETECTOR
177 def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI
173 178
174config BOOTPARAM_SOFTLOCKUP_PANIC 179config BOOTPARAM_SOFTLOCKUP_PANIC
175 bool "Panic (Reboot) On Soft Lockups" 180 bool "Panic (Reboot) On Soft Lockups"
176 depends on DETECT_SOFTLOCKUP 181 depends on LOCKUP_DETECTOR
177 help 182 help
178 Say Y here to enable the kernel to panic on "soft lockups", 183 Say Y here to enable the kernel to panic on "soft lockups",
179 which are bugs that cause the kernel to loop in kernel 184 which are bugs that cause the kernel to loop in kernel
@@ -190,7 +195,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC
190 195
191config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE 196config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
192 int 197 int
193 depends on DETECT_SOFTLOCKUP 198 depends on LOCKUP_DETECTOR
194 range 0 1 199 range 0 1
195 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC 200 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
196 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC 201 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
@@ -307,6 +312,12 @@ config DEBUG_OBJECTS_WORK
307 work queue routines to track the life time of work objects and 312 work queue routines to track the life time of work objects and
308 validate the work operations. 313 validate the work operations.
309 314
315config DEBUG_OBJECTS_RCU_HEAD
316 bool "Debug RCU callbacks objects"
317 depends on DEBUG_OBJECTS && PREEMPT
318 help
319 Enable this to turn on debugging of RCU list heads (call_rcu() usage).
320
310config DEBUG_OBJECTS_ENABLE_DEFAULT 321config DEBUG_OBJECTS_ENABLE_DEFAULT
311 int "debug_objects bootup default value (0-1)" 322 int "debug_objects bootup default value (0-1)"
312 range 0 1 323 range 0 1
@@ -528,7 +539,7 @@ config LOCKDEP
528 bool 539 bool
529 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 540 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
530 select STACKTRACE 541 select STACKTRACE
531 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 542 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE
532 select KALLSYMS 543 select KALLSYMS
533 select KALLSYMS_ALL 544 select KALLSYMS_ALL
534 545
@@ -628,6 +639,19 @@ config DEBUG_INFO
628 639
629 If unsure, say N. 640 If unsure, say N.
630 641
642config DEBUG_INFO_REDUCED
643 bool "Reduce debugging information"
644 depends on DEBUG_INFO
645 help
646 If you say Y here gcc is instructed to generate less debugging
647 information for structure types. This means that tools that
648 need full debugging information (like kgdb or systemtap) won't
649 be happy. But if you merely need debugging information to
650 resolve line numbers there is no loss. Advantage is that
651 build directory object sizes shrink dramatically over a full
652 DEBUG_INFO build and compile times are reduced too.
653 Only works with newer gcc versions.
654
631config DEBUG_VM 655config DEBUG_VM
632 bool "Debug VM" 656 bool "Debug VM"
633 depends on DEBUG_KERNEL 657 depends on DEBUG_KERNEL
@@ -937,7 +961,7 @@ config FAIL_MAKE_REQUEST
937 Provide fault-injection capability for disk IO. 961 Provide fault-injection capability for disk IO.
938 962
939config FAIL_IO_TIMEOUT 963config FAIL_IO_TIMEOUT
940 bool "Faul-injection capability for faking disk interrupts" 964 bool "Fault-injection capability for faking disk interrupts"
941 depends on FAULT_INJECTION && BLOCK 965 depends on FAULT_INJECTION && BLOCK
942 help 966 help
943 Provide fault-injection capability on end IO handling. This 967 Provide fault-injection capability on end IO handling. This
@@ -958,13 +982,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER
958 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 982 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
959 depends on !X86_64 983 depends on !X86_64
960 select STACKTRACE 984 select STACKTRACE
961 select FRAME_POINTER if !PPC && !S390 985 select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE
962 help 986 help
963 Provide stacktrace filter for fault-injection capabilities 987 Provide stacktrace filter for fault-injection capabilities
964 988
965config LATENCYTOP 989config LATENCYTOP
966 bool "Latency measuring infrastructure" 990 bool "Latency measuring infrastructure"
967 select FRAME_POINTER if !MIPS && !PPC && !S390 991 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
968 select KALLSYMS 992 select KALLSYMS
969 select KALLSYMS_ALL 993 select KALLSYMS_ALL
970 select STACKTRACE 994 select STACKTRACE
diff --git a/lib/Makefile b/lib/Makefile
index 3f1062cbbff4..0bfabba1bb32 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -89,8 +89,6 @@ obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
89 89
90lib-$(CONFIG_GENERIC_BUG) += bug.o 90lib-$(CONFIG_GENERIC_BUG) += bug.o
91 91
92obj-$(CONFIG_HAVE_LMB) += lmb.o
93
94obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o 92obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
95 93
96obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o 94obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 250ed11d3ed2..44524cc8c32a 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -114,7 +114,7 @@ static __init int test_atomic64(void)
114 BUG_ON(v.counter != r); 114 BUG_ON(v.counter != r);
115 115
116#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \ 116#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \
117 defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) 117 defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) || defined(CONFIG_ARM)
118 INIT(onestwos); 118 INIT(onestwos);
119 BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); 119 BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
120 r -= one; 120 r -= one;
diff --git a/lib/devres.c b/lib/devres.c
index 49368608f988..6efddf53b90c 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -328,7 +328,7 @@ EXPORT_SYMBOL(pcim_iomap_regions_request_all);
328 * @pdev: PCI device to map IO resources for 328 * @pdev: PCI device to map IO resources for
329 * @mask: Mask of BARs to unmap and release 329 * @mask: Mask of BARs to unmap and release
330 * 330 *
331 * Unamp and release regions specified by @mask. 331 * Unmap and release regions specified by @mask.
332 */ 332 */
333void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask) 333void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask)
334{ 334{
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 3df8eb17a607..02afc2533728 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -692,7 +692,7 @@ static void ddebug_table_free(struct ddebug_table *dt)
692 * Called in response to a module being unloaded. Removes 692 * Called in response to a module being unloaded. Removes
693 * any ddebug_table's which point at the module. 693 * any ddebug_table's which point at the module.
694 */ 694 */
695int ddebug_remove_module(char *mod_name) 695int ddebug_remove_module(const char *mod_name)
696{ 696{
697 struct ddebug_table *dt, *nextdt; 697 struct ddebug_table *dt, *nextdt;
698 int ret = -ENOENT; 698 int ret = -ENOENT;
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 736c3b06398e..1923f1490e72 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -128,7 +128,6 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
128 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 128 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
129 129
130 end_bit = (chunk->end_addr - chunk->start_addr) >> order; 130 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
131 end_bit -= nbits + 1;
132 131
133 spin_lock_irqsave(&chunk->lock, flags); 132 spin_lock_irqsave(&chunk->lock, flags);
134 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, 133 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
diff --git a/lib/idr.c b/lib/idr.c
index c1a206901761..7f1a4f0acf50 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -602,7 +602,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
602 /* find first ent */ 602 /* find first ent */
603 n = idp->layers * IDR_BITS; 603 n = idp->layers * IDR_BITS;
604 max = 1 << n; 604 max = 1 << n;
605 p = rcu_dereference(idp->top); 605 p = rcu_dereference_raw(idp->top);
606 if (!p) 606 if (!p)
607 return NULL; 607 return NULL;
608 608
@@ -610,7 +610,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
610 while (n > 0 && p) { 610 while (n > 0 && p) {
611 n -= IDR_BITS; 611 n -= IDR_BITS;
612 *paa++ = p; 612 *paa++ = p;
613 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 613 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
614 } 614 }
615 615
616 if (p) { 616 if (p) {
diff --git a/lib/lmb.c b/lib/lmb.c
deleted file mode 100644
index b1fc52606524..000000000000
--- a/lib/lmb.c
+++ /dev/null
@@ -1,541 +0,0 @@
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/bitops.h>
16#include <linux/lmb.h>
17
18#define LMB_ALLOC_ANYWHERE 0
19
20struct lmb lmb;
21
22static int lmb_debug;
23
24static int __init early_lmb(char *p)
25{
26 if (p && strstr(p, "debug"))
27 lmb_debug = 1;
28 return 0;
29}
30early_param("lmb", early_lmb);
31
32static void lmb_dump(struct lmb_region *region, char *name)
33{
34 unsigned long long base, size;
35 int i;
36
37 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
38
39 for (i = 0; i < region->cnt; i++) {
40 base = region->region[i].base;
41 size = region->region[i].size;
42
43 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
44 name, i, base, base + size - 1, size);
45 }
46}
47
48void lmb_dump_all(void)
49{
50 if (!lmb_debug)
51 return;
52
53 pr_info("LMB configuration:\n");
54 pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size);
55 pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size);
56
57 lmb_dump(&lmb.memory, "memory");
58 lmb_dump(&lmb.reserved, "reserved");
59}
60
61static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
62 u64 size2)
63{
64 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
65}
66
67static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
68{
69 if (base2 == base1 + size1)
70 return 1;
71 else if (base1 == base2 + size2)
72 return -1;
73
74 return 0;
75}
76
77static long lmb_regions_adjacent(struct lmb_region *rgn,
78 unsigned long r1, unsigned long r2)
79{
80 u64 base1 = rgn->region[r1].base;
81 u64 size1 = rgn->region[r1].size;
82 u64 base2 = rgn->region[r2].base;
83 u64 size2 = rgn->region[r2].size;
84
85 return lmb_addrs_adjacent(base1, size1, base2, size2);
86}
87
88static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
89{
90 unsigned long i;
91
92 for (i = r; i < rgn->cnt - 1; i++) {
93 rgn->region[i].base = rgn->region[i + 1].base;
94 rgn->region[i].size = rgn->region[i + 1].size;
95 }
96 rgn->cnt--;
97}
98
99/* Assumption: base addr of region 1 < base addr of region 2 */
100static void lmb_coalesce_regions(struct lmb_region *rgn,
101 unsigned long r1, unsigned long r2)
102{
103 rgn->region[r1].size += rgn->region[r2].size;
104 lmb_remove_region(rgn, r2);
105}
106
107void __init lmb_init(void)
108{
109 /* Create a dummy zero size LMB which will get coalesced away later.
110 * This simplifies the lmb_add() code below...
111 */
112 lmb.memory.region[0].base = 0;
113 lmb.memory.region[0].size = 0;
114 lmb.memory.cnt = 1;
115
116 /* Ditto. */
117 lmb.reserved.region[0].base = 0;
118 lmb.reserved.region[0].size = 0;
119 lmb.reserved.cnt = 1;
120}
121
122void __init lmb_analyze(void)
123{
124 int i;
125
126 lmb.memory.size = 0;
127
128 for (i = 0; i < lmb.memory.cnt; i++)
129 lmb.memory.size += lmb.memory.region[i].size;
130}
131
132static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
133{
134 unsigned long coalesced = 0;
135 long adjacent, i;
136
137 if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
138 rgn->region[0].base = base;
139 rgn->region[0].size = size;
140 return 0;
141 }
142
143 /* First try and coalesce this LMB with another. */
144 for (i = 0; i < rgn->cnt; i++) {
145 u64 rgnbase = rgn->region[i].base;
146 u64 rgnsize = rgn->region[i].size;
147
148 if ((rgnbase == base) && (rgnsize == size))
149 /* Already have this region, so we're done */
150 return 0;
151
152 adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
153 if (adjacent > 0) {
154 rgn->region[i].base -= size;
155 rgn->region[i].size += size;
156 coalesced++;
157 break;
158 } else if (adjacent < 0) {
159 rgn->region[i].size += size;
160 coalesced++;
161 break;
162 }
163 }
164
165 if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
166 lmb_coalesce_regions(rgn, i, i+1);
167 coalesced++;
168 }
169
170 if (coalesced)
171 return coalesced;
172 if (rgn->cnt >= MAX_LMB_REGIONS)
173 return -1;
174
175 /* Couldn't coalesce the LMB, so add it to the sorted table. */
176 for (i = rgn->cnt - 1; i >= 0; i--) {
177 if (base < rgn->region[i].base) {
178 rgn->region[i+1].base = rgn->region[i].base;
179 rgn->region[i+1].size = rgn->region[i].size;
180 } else {
181 rgn->region[i+1].base = base;
182 rgn->region[i+1].size = size;
183 break;
184 }
185 }
186
187 if (base < rgn->region[0].base) {
188 rgn->region[0].base = base;
189 rgn->region[0].size = size;
190 }
191 rgn->cnt++;
192
193 return 0;
194}
195
196long lmb_add(u64 base, u64 size)
197{
198 struct lmb_region *_rgn = &lmb.memory;
199
200 /* On pSeries LPAR systems, the first LMB is our RMO region. */
201 if (base == 0)
202 lmb.rmo_size = size;
203
204 return lmb_add_region(_rgn, base, size);
205
206}
207
208static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size)
209{
210 u64 rgnbegin, rgnend;
211 u64 end = base + size;
212 int i;
213
214 rgnbegin = rgnend = 0; /* supress gcc warnings */
215
216 /* Find the region where (base, size) belongs to */
217 for (i=0; i < rgn->cnt; i++) {
218 rgnbegin = rgn->region[i].base;
219 rgnend = rgnbegin + rgn->region[i].size;
220
221 if ((rgnbegin <= base) && (end <= rgnend))
222 break;
223 }
224
225 /* Didn't find the region */
226 if (i == rgn->cnt)
227 return -1;
228
229 /* Check to see if we are removing entire region */
230 if ((rgnbegin == base) && (rgnend == end)) {
231 lmb_remove_region(rgn, i);
232 return 0;
233 }
234
235 /* Check to see if region is matching at the front */
236 if (rgnbegin == base) {
237 rgn->region[i].base = end;
238 rgn->region[i].size -= size;
239 return 0;
240 }
241
242 /* Check to see if the region is matching at the end */
243 if (rgnend == end) {
244 rgn->region[i].size -= size;
245 return 0;
246 }
247
248 /*
249 * We need to split the entry - adjust the current one to the
250 * beginging of the hole and add the region after hole.
251 */
252 rgn->region[i].size = base - rgn->region[i].base;
253 return lmb_add_region(rgn, end, rgnend - end);
254}
255
256long lmb_remove(u64 base, u64 size)
257{
258 return __lmb_remove(&lmb.memory, base, size);
259}
260
261long __init lmb_free(u64 base, u64 size)
262{
263 return __lmb_remove(&lmb.reserved, base, size);
264}
265
266long __init lmb_reserve(u64 base, u64 size)
267{
268 struct lmb_region *_rgn = &lmb.reserved;
269
270 BUG_ON(0 == size);
271
272 return lmb_add_region(_rgn, base, size);
273}
274
275long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
276{
277 unsigned long i;
278
279 for (i = 0; i < rgn->cnt; i++) {
280 u64 rgnbase = rgn->region[i].base;
281 u64 rgnsize = rgn->region[i].size;
282 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
283 break;
284 }
285
286 return (i < rgn->cnt) ? i : -1;
287}
288
289static u64 lmb_align_down(u64 addr, u64 size)
290{
291 return addr & ~(size - 1);
292}
293
294static u64 lmb_align_up(u64 addr, u64 size)
295{
296 return (addr + (size - 1)) & ~(size - 1);
297}
298
299static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
300 u64 size, u64 align)
301{
302 u64 base, res_base;
303 long j;
304
305 base = lmb_align_down((end - size), align);
306 while (start <= base) {
307 j = lmb_overlaps_region(&lmb.reserved, base, size);
308 if (j < 0) {
309 /* this area isn't reserved, take it */
310 if (lmb_add_region(&lmb.reserved, base, size) < 0)
311 base = ~(u64)0;
312 return base;
313 }
314 res_base = lmb.reserved.region[j].base;
315 if (res_base < size)
316 break;
317 base = lmb_align_down(res_base - size, align);
318 }
319
320 return ~(u64)0;
321}
322
323static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
324 u64 (*nid_range)(u64, u64, int *),
325 u64 size, u64 align, int nid)
326{
327 u64 start, end;
328
329 start = mp->base;
330 end = start + mp->size;
331
332 start = lmb_align_up(start, align);
333 while (start < end) {
334 u64 this_end;
335 int this_nid;
336
337 this_end = nid_range(start, end, &this_nid);
338 if (this_nid == nid) {
339 u64 ret = lmb_alloc_nid_unreserved(start, this_end,
340 size, align);
341 if (ret != ~(u64)0)
342 return ret;
343 }
344 start = this_end;
345 }
346
347 return ~(u64)0;
348}
349
350u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
351 u64 (*nid_range)(u64 start, u64 end, int *nid))
352{
353 struct lmb_region *mem = &lmb.memory;
354 int i;
355
356 BUG_ON(0 == size);
357
358 size = lmb_align_up(size, align);
359
360 for (i = 0; i < mem->cnt; i++) {
361 u64 ret = lmb_alloc_nid_region(&mem->region[i],
362 nid_range,
363 size, align, nid);
364 if (ret != ~(u64)0)
365 return ret;
366 }
367
368 return lmb_alloc(size, align);
369}
370
371u64 __init lmb_alloc(u64 size, u64 align)
372{
373 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
374}
375
376u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
377{
378 u64 alloc;
379
380 alloc = __lmb_alloc_base(size, align, max_addr);
381
382 if (alloc == 0)
383 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
384 (unsigned long long) size, (unsigned long long) max_addr);
385
386 return alloc;
387}
388
389u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
390{
391 long i, j;
392 u64 base = 0;
393 u64 res_base;
394
395 BUG_ON(0 == size);
396
397 size = lmb_align_up(size, align);
398
399 /* On some platforms, make sure we allocate lowmem */
400 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
401 if (max_addr == LMB_ALLOC_ANYWHERE)
402 max_addr = LMB_REAL_LIMIT;
403
404 for (i = lmb.memory.cnt - 1; i >= 0; i--) {
405 u64 lmbbase = lmb.memory.region[i].base;
406 u64 lmbsize = lmb.memory.region[i].size;
407
408 if (lmbsize < size)
409 continue;
410 if (max_addr == LMB_ALLOC_ANYWHERE)
411 base = lmb_align_down(lmbbase + lmbsize - size, align);
412 else if (lmbbase < max_addr) {
413 base = min(lmbbase + lmbsize, max_addr);
414 base = lmb_align_down(base - size, align);
415 } else
416 continue;
417
418 while (base && lmbbase <= base) {
419 j = lmb_overlaps_region(&lmb.reserved, base, size);
420 if (j < 0) {
421 /* this area isn't reserved, take it */
422 if (lmb_add_region(&lmb.reserved, base, size) < 0)
423 return 0;
424 return base;
425 }
426 res_base = lmb.reserved.region[j].base;
427 if (res_base < size)
428 break;
429 base = lmb_align_down(res_base - size, align);
430 }
431 }
432 return 0;
433}
434
435/* You must call lmb_analyze() before this. */
436u64 __init lmb_phys_mem_size(void)
437{
438 return lmb.memory.size;
439}
440
441u64 lmb_end_of_DRAM(void)
442{
443 int idx = lmb.memory.cnt - 1;
444
445 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
446}
447
448/* You must call lmb_analyze() after this. */
449void __init lmb_enforce_memory_limit(u64 memory_limit)
450{
451 unsigned long i;
452 u64 limit;
453 struct lmb_property *p;
454
455 if (!memory_limit)
456 return;
457
458 /* Truncate the lmb regions to satisfy the memory limit. */
459 limit = memory_limit;
460 for (i = 0; i < lmb.memory.cnt; i++) {
461 if (limit > lmb.memory.region[i].size) {
462 limit -= lmb.memory.region[i].size;
463 continue;
464 }
465
466 lmb.memory.region[i].size = limit;
467 lmb.memory.cnt = i + 1;
468 break;
469 }
470
471 if (lmb.memory.region[0].size < lmb.rmo_size)
472 lmb.rmo_size = lmb.memory.region[0].size;
473
474 memory_limit = lmb_end_of_DRAM();
475
476 /* And truncate any reserves above the limit also. */
477 for (i = 0; i < lmb.reserved.cnt; i++) {
478 p = &lmb.reserved.region[i];
479
480 if (p->base > memory_limit)
481 p->size = 0;
482 else if ((p->base + p->size) > memory_limit)
483 p->size = memory_limit - p->base;
484
485 if (p->size == 0) {
486 lmb_remove_region(&lmb.reserved, i);
487 i--;
488 }
489 }
490}
491
492int __init lmb_is_reserved(u64 addr)
493{
494 int i;
495
496 for (i = 0; i < lmb.reserved.cnt; i++) {
497 u64 upper = lmb.reserved.region[i].base +
498 lmb.reserved.region[i].size - 1;
499 if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
500 return 1;
501 }
502 return 0;
503}
504
505int lmb_is_region_reserved(u64 base, u64 size)
506{
507 return lmb_overlaps_region(&lmb.reserved, base, size);
508}
509
510/*
511 * Given a <base, len>, find which memory regions belong to this range.
512 * Adjust the request and return a contiguous chunk.
513 */
514int lmb_find(struct lmb_property *res)
515{
516 int i;
517 u64 rstart, rend;
518
519 rstart = res->base;
520 rend = rstart + res->size - 1;
521
522 for (i = 0; i < lmb.memory.cnt; i++) {
523 u64 start = lmb.memory.region[i].base;
524 u64 end = start + lmb.memory.region[i].size - 1;
525
526 if (start > rend)
527 return -1;
528
529 if ((end >= rstart) && (start < rend)) {
530 /* adjust the request */
531 if (rstart < start)
532 rstart = start;
533 if (rend > end)
534 rend = end;
535 res->base = rstart;
536 res->size = rend - rstart + 1;
537 return 0;
538 }
539 }
540 return -1;
541}
diff --git a/lib/random32.c b/lib/random32.c
index 870dc3fc0f0f..fc3545a32771 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -127,7 +127,7 @@ core_initcall(random32_init);
127 127
128/* 128/*
129 * Generate better values after random number generator 129 * Generate better values after random number generator
130 * is fully initalized. 130 * is fully initialized.
131 */ 131 */
132static int __init random32_reseed(void) 132static int __init random32_reseed(void)
133{ 133{
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 15e10b1afdd2..4693f79195d3 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -44,11 +44,6 @@ static void __rb_rotate_left(struct rb_node *node, struct rb_root *root)
44 else 44 else
45 root->rb_node = right; 45 root->rb_node = right;
46 rb_set_parent(node, right); 46 rb_set_parent(node, right);
47
48 if (root->augment_cb) {
49 root->augment_cb(node);
50 root->augment_cb(right);
51 }
52} 47}
53 48
54static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) 49static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
@@ -72,20 +67,12 @@ static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
72 else 67 else
73 root->rb_node = left; 68 root->rb_node = left;
74 rb_set_parent(node, left); 69 rb_set_parent(node, left);
75
76 if (root->augment_cb) {
77 root->augment_cb(node);
78 root->augment_cb(left);
79 }
80} 70}
81 71
82void rb_insert_color(struct rb_node *node, struct rb_root *root) 72void rb_insert_color(struct rb_node *node, struct rb_root *root)
83{ 73{
84 struct rb_node *parent, *gparent; 74 struct rb_node *parent, *gparent;
85 75
86 if (root->augment_cb)
87 root->augment_cb(node);
88
89 while ((parent = rb_parent(node)) && rb_is_red(parent)) 76 while ((parent = rb_parent(node)) && rb_is_red(parent))
90 { 77 {
91 gparent = rb_parent(parent); 78 gparent = rb_parent(parent);
@@ -240,15 +227,12 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
240 else 227 else
241 { 228 {
242 struct rb_node *old = node, *left; 229 struct rb_node *old = node, *left;
243 int old_parent_cb = 0;
244 int successor_parent_cb = 0;
245 230
246 node = node->rb_right; 231 node = node->rb_right;
247 while ((left = node->rb_left) != NULL) 232 while ((left = node->rb_left) != NULL)
248 node = left; 233 node = left;
249 234
250 if (rb_parent(old)) { 235 if (rb_parent(old)) {
251 old_parent_cb = 1;
252 if (rb_parent(old)->rb_left == old) 236 if (rb_parent(old)->rb_left == old)
253 rb_parent(old)->rb_left = node; 237 rb_parent(old)->rb_left = node;
254 else 238 else
@@ -263,10 +247,8 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
263 if (parent == old) { 247 if (parent == old) {
264 parent = node; 248 parent = node;
265 } else { 249 } else {
266 successor_parent_cb = 1;
267 if (child) 250 if (child)
268 rb_set_parent(child, parent); 251 rb_set_parent(child, parent);
269
270 parent->rb_left = child; 252 parent->rb_left = child;
271 253
272 node->rb_right = old->rb_right; 254 node->rb_right = old->rb_right;
@@ -277,24 +259,6 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
277 node->rb_left = old->rb_left; 259 node->rb_left = old->rb_left;
278 rb_set_parent(old->rb_left, node); 260 rb_set_parent(old->rb_left, node);
279 261
280 if (root->augment_cb) {
281 /*
282 * Here, three different nodes can have new children.
283 * The parent of the successor node that was selected
284 * to replace the node to be erased.
285 * The node that is getting erased and is now replaced
286 * by its successor.
287 * The parent of the node getting erased-replaced.
288 */
289 if (successor_parent_cb)
290 root->augment_cb(parent);
291
292 root->augment_cb(node);
293
294 if (old_parent_cb)
295 root->augment_cb(rb_parent(old));
296 }
297
298 goto color; 262 goto color;
299 } 263 }
300 264
@@ -303,19 +267,15 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
303 267
304 if (child) 268 if (child)
305 rb_set_parent(child, parent); 269 rb_set_parent(child, parent);
306 270 if (parent)
307 if (parent) { 271 {
308 if (parent->rb_left == node) 272 if (parent->rb_left == node)
309 parent->rb_left = child; 273 parent->rb_left = child;
310 else 274 else
311 parent->rb_right = child; 275 parent->rb_right = child;
312
313 if (root->augment_cb)
314 root->augment_cb(parent);
315
316 } else {
317 root->rb_node = child;
318 } 276 }
277 else
278 root->rb_node = child;
319 279
320 color: 280 color:
321 if (color == RB_BLACK) 281 if (color == RB_BLACK)
@@ -323,6 +283,74 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
323} 283}
324EXPORT_SYMBOL(rb_erase); 284EXPORT_SYMBOL(rb_erase);
325 285
286static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data)
287{
288 struct rb_node *parent;
289
290up:
291 func(node, data);
292 parent = rb_parent(node);
293 if (!parent)
294 return;
295
296 if (node == parent->rb_left && parent->rb_right)
297 func(parent->rb_right, data);
298 else if (parent->rb_left)
299 func(parent->rb_left, data);
300
301 node = parent;
302 goto up;
303}
304
305/*
306 * after inserting @node into the tree, update the tree to account for
307 * both the new entry and any damage done by rebalance
308 */
309void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data)
310{
311 if (node->rb_left)
312 node = node->rb_left;
313 else if (node->rb_right)
314 node = node->rb_right;
315
316 rb_augment_path(node, func, data);
317}
318
319/*
320 * before removing the node, find the deepest node on the rebalance path
321 * that will still be there after @node gets removed
322 */
323struct rb_node *rb_augment_erase_begin(struct rb_node *node)
324{
325 struct rb_node *deepest;
326
327 if (!node->rb_right && !node->rb_left)
328 deepest = rb_parent(node);
329 else if (!node->rb_right)
330 deepest = node->rb_left;
331 else if (!node->rb_left)
332 deepest = node->rb_right;
333 else {
334 deepest = rb_next(node);
335 if (deepest->rb_right)
336 deepest = deepest->rb_right;
337 else if (rb_parent(deepest) != node)
338 deepest = rb_parent(deepest);
339 }
340
341 return deepest;
342}
343
344/*
345 * after removal, update the tree to account for the removed entry
346 * and any rebalance damage.
347 */
348void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data)
349{
350 if (node)
351 rb_augment_path(node, func, data);
352}
353
326/* 354/*
327 * This function returns the first node (in sort order) of the tree. 355 * This function returns the first node (in sort order) of the tree.
328 */ 356 */
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index a009055140ec..34e3082632d8 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -50,19 +50,11 @@
50 */ 50 */
51#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 51#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
52 52
53/*
54 * Enumeration for sync targets
55 */
56enum dma_sync_target {
57 SYNC_FOR_CPU = 0,
58 SYNC_FOR_DEVICE = 1,
59};
60
61int swiotlb_force; 53int swiotlb_force;
62 54
63/* 55/*
64 * Used to do a quick range check in unmap_single and 56 * Used to do a quick range check in swiotlb_tbl_unmap_single and
65 * sync_single_*, to see if the memory was in fact allocated by this 57 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
66 * API. 58 * API.
67 */ 59 */
68static char *io_tlb_start, *io_tlb_end; 60static char *io_tlb_start, *io_tlb_end;
@@ -140,28 +132,14 @@ void swiotlb_print_info(void)
140 (unsigned long long)pend); 132 (unsigned long long)pend);
141} 133}
142 134
143/* 135void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
144 * Statically reserve bounce buffer space and initialize bounce buffer data
145 * structures for the software IO TLB used to implement the DMA API.
146 */
147void __init
148swiotlb_init_with_default_size(size_t default_size, int verbose)
149{ 136{
150 unsigned long i, bytes; 137 unsigned long i, bytes;
151 138
152 if (!io_tlb_nslabs) { 139 bytes = nslabs << IO_TLB_SHIFT;
153 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
154 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
155 }
156
157 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
158 140
159 /* 141 io_tlb_nslabs = nslabs;
160 * Get IO TLB memory from the low pages 142 io_tlb_start = tlb;
161 */
162 io_tlb_start = alloc_bootmem_low_pages(bytes);
163 if (!io_tlb_start)
164 panic("Cannot allocate SWIOTLB buffer");
165 io_tlb_end = io_tlb_start + bytes; 143 io_tlb_end = io_tlb_start + bytes;
166 144
167 /* 145 /*
@@ -185,6 +163,32 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
185 swiotlb_print_info(); 163 swiotlb_print_info();
186} 164}
187 165
166/*
167 * Statically reserve bounce buffer space and initialize bounce buffer data
168 * structures for the software IO TLB used to implement the DMA API.
169 */
170void __init
171swiotlb_init_with_default_size(size_t default_size, int verbose)
172{
173 unsigned long bytes;
174
175 if (!io_tlb_nslabs) {
176 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
177 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
178 }
179
180 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
181
182 /*
183 * Get IO TLB memory from the low pages
184 */
185 io_tlb_start = alloc_bootmem_low_pages(bytes);
186 if (!io_tlb_start)
187 panic("Cannot allocate SWIOTLB buffer");
188
189 swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose);
190}
191
188void __init 192void __init
189swiotlb_init(int verbose) 193swiotlb_init(int verbose)
190{ 194{
@@ -323,8 +327,8 @@ static int is_swiotlb_buffer(phys_addr_t paddr)
323/* 327/*
324 * Bounce: copy the swiotlb buffer back to the original dma location 328 * Bounce: copy the swiotlb buffer back to the original dma location
325 */ 329 */
326static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 330void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
327 enum dma_data_direction dir) 331 enum dma_data_direction dir)
328{ 332{
329 unsigned long pfn = PFN_DOWN(phys); 333 unsigned long pfn = PFN_DOWN(phys);
330 334
@@ -360,26 +364,25 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
360 memcpy(phys_to_virt(phys), dma_addr, size); 364 memcpy(phys_to_virt(phys), dma_addr, size);
361 } 365 }
362} 366}
367EXPORT_SYMBOL_GPL(swiotlb_bounce);
363 368
364/* 369void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
365 * Allocates bounce buffer and returns its kernel virtual address. 370 phys_addr_t phys, size_t size,
366 */ 371 enum dma_data_direction dir)
367static void *
368map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
369{ 372{
370 unsigned long flags; 373 unsigned long flags;
371 char *dma_addr; 374 char *dma_addr;
372 unsigned int nslots, stride, index, wrap; 375 unsigned int nslots, stride, index, wrap;
373 int i; 376 int i;
374 unsigned long start_dma_addr;
375 unsigned long mask; 377 unsigned long mask;
376 unsigned long offset_slots; 378 unsigned long offset_slots;
377 unsigned long max_slots; 379 unsigned long max_slots;
378 380
379 mask = dma_get_seg_boundary(hwdev); 381 mask = dma_get_seg_boundary(hwdev);
380 start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
381 382
382 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 383 tbl_dma_addr &= mask;
384
385 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
383 386
384 /* 387 /*
385 * Carefully handle integer overflow which can occur when mask == ~0UL. 388 * Carefully handle integer overflow which can occur when mask == ~0UL.
@@ -466,12 +469,27 @@ found:
466 469
467 return dma_addr; 470 return dma_addr;
468} 471}
472EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
473
474/*
475 * Allocates bounce buffer and returns its kernel virtual address.
476 */
477
478static void *
479map_single(struct device *hwdev, phys_addr_t phys, size_t size,
480 enum dma_data_direction dir)
481{
482 dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start);
483
484 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
485}
469 486
470/* 487/*
471 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 488 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
472 */ 489 */
473static void 490void
474do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) 491swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
492 enum dma_data_direction dir)
475{ 493{
476 unsigned long flags; 494 unsigned long flags;
477 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 495 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -509,10 +527,12 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
509 } 527 }
510 spin_unlock_irqrestore(&io_tlb_lock, flags); 528 spin_unlock_irqrestore(&io_tlb_lock, flags);
511} 529}
530EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
512 531
513static void 532void
514sync_single(struct device *hwdev, char *dma_addr, size_t size, 533swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
515 int dir, int target) 534 enum dma_data_direction dir,
535 enum dma_sync_target target)
516{ 536{
517 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 537 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
518 phys_addr_t phys = io_tlb_orig_addr[index]; 538 phys_addr_t phys = io_tlb_orig_addr[index];
@@ -536,6 +556,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
536 BUG(); 556 BUG();
537 } 557 }
538} 558}
559EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
539 560
540void * 561void *
541swiotlb_alloc_coherent(struct device *hwdev, size_t size, 562swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -559,8 +580,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
559 } 580 }
560 if (!ret) { 581 if (!ret) {
561 /* 582 /*
562 * We are either out of memory or the device can't DMA 583 * We are either out of memory or the device can't DMA to
563 * to GFP_DMA memory; fall back on map_single(), which 584 * GFP_DMA memory; fall back on map_single(), which
564 * will grab memory from the lowest available address range. 585 * will grab memory from the lowest available address range.
565 */ 586 */
566 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); 587 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
@@ -578,7 +599,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
578 (unsigned long long)dev_addr); 599 (unsigned long long)dev_addr);
579 600
580 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 601 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
581 do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 602 swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
582 return NULL; 603 return NULL;
583 } 604 }
584 *dma_handle = dev_addr; 605 *dma_handle = dev_addr;
@@ -596,13 +617,14 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
596 if (!is_swiotlb_buffer(paddr)) 617 if (!is_swiotlb_buffer(paddr))
597 free_pages((unsigned long)vaddr, get_order(size)); 618 free_pages((unsigned long)vaddr, get_order(size));
598 else 619 else
599 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 620 /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
600 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 621 swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
601} 622}
602EXPORT_SYMBOL(swiotlb_free_coherent); 623EXPORT_SYMBOL(swiotlb_free_coherent);
603 624
604static void 625static void
605swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 626swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
627 int do_panic)
606{ 628{
607 /* 629 /*
608 * Ran out of IOMMU space for this operation. This is very bad. 630 * Ran out of IOMMU space for this operation. This is very bad.
@@ -680,14 +702,14 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
680 * whatever the device wrote there. 702 * whatever the device wrote there.
681 */ 703 */
682static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 704static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
683 size_t size, int dir) 705 size_t size, enum dma_data_direction dir)
684{ 706{
685 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 707 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
686 708
687 BUG_ON(dir == DMA_NONE); 709 BUG_ON(dir == DMA_NONE);
688 710
689 if (is_swiotlb_buffer(paddr)) { 711 if (is_swiotlb_buffer(paddr)) {
690 do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); 712 swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
691 return; 713 return;
692 } 714 }
693 715
@@ -723,14 +745,16 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
723 */ 745 */
724static void 746static void
725swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 747swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
726 size_t size, int dir, int target) 748 size_t size, enum dma_data_direction dir,
749 enum dma_sync_target target)
727{ 750{
728 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 751 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
729 752
730 BUG_ON(dir == DMA_NONE); 753 BUG_ON(dir == DMA_NONE);
731 754
732 if (is_swiotlb_buffer(paddr)) { 755 if (is_swiotlb_buffer(paddr)) {
733 sync_single(hwdev, phys_to_virt(paddr), size, dir, target); 756 swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
757 target);
734 return; 758 return;
735 } 759 }
736 760
@@ -809,7 +833,7 @@ EXPORT_SYMBOL(swiotlb_map_sg_attrs);
809 833
810int 834int
811swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 835swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
812 int dir) 836 enum dma_data_direction dir)
813{ 837{
814 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); 838 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
815} 839}
@@ -836,7 +860,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
836 860
837void 861void
838swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 862swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
839 int dir) 863 enum dma_data_direction dir)
840{ 864{
841 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); 865 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
842} 866}
@@ -851,7 +875,8 @@ EXPORT_SYMBOL(swiotlb_unmap_sg);
851 */ 875 */
852static void 876static void
853swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, 877swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
854 int nelems, int dir, int target) 878 int nelems, enum dma_data_direction dir,
879 enum dma_sync_target target)
855{ 880{
856 struct scatterlist *sg; 881 struct scatterlist *sg;
857 int i; 882 int i;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index b8a2f549ab0e..4ee19d0d3910 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -980,6 +980,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
980 * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15] 980 * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15]
981 * little endian output byte order is: 981 * little endian output byte order is:
982 * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15] 982 * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15]
983 * - 'V' For a struct va_format which contains a format string * and va_list *,
984 * call vsnprintf(->format, *->va_list).
985 * Implements a "recursive vsnprintf".
986 * Do not use this feature without some mechanism to verify the
987 * correctness of the format string and va_list arguments.
983 * 988 *
984 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 989 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
985 * function pointers are really function descriptors, which contain a 990 * function pointers are really function descriptors, which contain a
@@ -1025,6 +1030,10 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1025 break; 1030 break;
1026 case 'U': 1031 case 'U':
1027 return uuid_string(buf, end, ptr, spec, fmt); 1032 return uuid_string(buf, end, ptr, spec, fmt);
1033 case 'V':
1034 return buf + vsnprintf(buf, end - buf,
1035 ((struct va_format *)ptr)->fmt,
1036 *(((struct va_format *)ptr)->va));
1028 } 1037 }
1029 spec.flags |= SMALL; 1038 spec.flags |= SMALL;
1030 if (spec.field_width == -1) { 1039 if (spec.field_width == -1) {