aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug12
-rw-r--r--lib/Makefile6
-rw-r--r--lib/asn1_decoder.c2
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/cpumask.c2
-rw-r--r--lib/earlycpio.c145
-rw-r--r--lib/mpi/longlong.h19
-rw-r--r--lib/swiotlb.c269
8 files changed, 313 insertions, 144 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 28e9d6c98941..e458782f3c52 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -972,7 +972,7 @@ config RCU_CPU_STALL_TIMEOUT
972 int "RCU CPU stall timeout in seconds" 972 int "RCU CPU stall timeout in seconds"
973 depends on TREE_RCU || TREE_PREEMPT_RCU 973 depends on TREE_RCU || TREE_PREEMPT_RCU
974 range 3 300 974 range 3 300
975 default 60 975 default 21
976 help 976 help
977 If a given RCU grace period extends more than the specified 977 If a given RCU grace period extends more than the specified
978 number of seconds, a CPU stall warning is printed. If the 978 number of seconds, a CPU stall warning is printed. If the
@@ -1115,7 +1115,7 @@ config NOTIFIER_ERROR_INJECTION
1115 depends on DEBUG_KERNEL 1115 depends on DEBUG_KERNEL
1116 select DEBUG_FS 1116 select DEBUG_FS
1117 help 1117 help
1118 This option provides the ability to inject artifical errors to 1118 This option provides the ability to inject artificial errors to
1119 specified notifier chain callbacks. It is useful to test the error 1119 specified notifier chain callbacks. It is useful to test the error
1120 handling of notifier call chain failures. 1120 handling of notifier call chain failures.
1121 1121
@@ -1126,7 +1126,7 @@ config CPU_NOTIFIER_ERROR_INJECT
1126 depends on HOTPLUG_CPU && NOTIFIER_ERROR_INJECTION 1126 depends on HOTPLUG_CPU && NOTIFIER_ERROR_INJECTION
1127 help 1127 help
1128 This option provides a kernel module that can be used to test 1128 This option provides a kernel module that can be used to test
1129 the error handling of the cpu notifiers by injecting artifical 1129 the error handling of the cpu notifiers by injecting artificial
1130 errors to CPU notifier chain callbacks. It is controlled through 1130 errors to CPU notifier chain callbacks. It is controlled through
1131 debugfs interface under /sys/kernel/debug/notifier-error-inject/cpu 1131 debugfs interface under /sys/kernel/debug/notifier-error-inject/cpu
1132 1132
@@ -1150,7 +1150,7 @@ config PM_NOTIFIER_ERROR_INJECT
1150 depends on PM && NOTIFIER_ERROR_INJECTION 1150 depends on PM && NOTIFIER_ERROR_INJECTION
1151 default m if PM_DEBUG 1151 default m if PM_DEBUG
1152 help 1152 help
1153 This option provides the ability to inject artifical errors to 1153 This option provides the ability to inject artificial errors to
1154 PM notifier chain callbacks. It is controlled through debugfs 1154 PM notifier chain callbacks. It is controlled through debugfs
1155 interface /sys/kernel/debug/notifier-error-inject/pm 1155 interface /sys/kernel/debug/notifier-error-inject/pm
1156 1156
@@ -1173,7 +1173,7 @@ config MEMORY_NOTIFIER_ERROR_INJECT
1173 tristate "Memory hotplug notifier error injection module" 1173 tristate "Memory hotplug notifier error injection module"
1174 depends on MEMORY_HOTPLUG_SPARSE && NOTIFIER_ERROR_INJECTION 1174 depends on MEMORY_HOTPLUG_SPARSE && NOTIFIER_ERROR_INJECTION
1175 help 1175 help
1176 This option provides the ability to inject artifical errors to 1176 This option provides the ability to inject artificial errors to
1177 memory hotplug notifier chain callbacks. It is controlled through 1177 memory hotplug notifier chain callbacks. It is controlled through
1178 debugfs interface under /sys/kernel/debug/notifier-error-inject/memory 1178 debugfs interface under /sys/kernel/debug/notifier-error-inject/memory
1179 1179
@@ -1196,7 +1196,7 @@ config PSERIES_RECONFIG_NOTIFIER_ERROR_INJECT
1196 tristate "pSeries reconfig notifier error injection module" 1196 tristate "pSeries reconfig notifier error injection module"
1197 depends on PPC_PSERIES && NOTIFIER_ERROR_INJECTION 1197 depends on PPC_PSERIES && NOTIFIER_ERROR_INJECTION
1198 help 1198 help
1199 This option provides the ability to inject artifical errors to 1199 This option provides the ability to inject artificial errors to
1200 pSeries reconfig notifier chain callbacks. It is controlled 1200 pSeries reconfig notifier chain callbacks. It is controlled
1201 through debugfs interface under 1201 through debugfs interface under
1202 /sys/kernel/debug/notifier-error-inject/pSeries-reconfig/ 1202 /sys/kernel/debug/notifier-error-inject/pSeries-reconfig/
diff --git a/lib/Makefile b/lib/Makefile
index 821a16229111..e2152fa7ff4d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
12 idr.o int_sqrt.o extable.o \ 12 idr.o int_sqrt.o extable.o \
13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o 15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
16 earlycpio.o
16 17
17lib-$(CONFIG_MMU) += ioremap.o 18lib-$(CONFIG_MMU) += ioremap.o
18lib-$(CONFIG_SMP) += cpumask.o 19lib-$(CONFIG_SMP) += cpumask.o
@@ -31,7 +32,6 @@ CFLAGS_kobject.o += -DDEBUG
31CFLAGS_kobject_uevent.o += -DDEBUG 32CFLAGS_kobject_uevent.o += -DDEBUG
32endif 33endif
33 34
34lib-$(CONFIG_HOTPLUG) += kobject_uevent.o
35obj-$(CONFIG_GENERIC_IOMAP) += iomap.o 35obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
36obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o 36obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
37obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o 37obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
@@ -163,7 +163,7 @@ $(obj)/crc32table.h: $(obj)/gen_crc32table
163# 163#
164obj-$(CONFIG_OID_REGISTRY) += oid_registry.o 164obj-$(CONFIG_OID_REGISTRY) += oid_registry.o
165 165
166$(obj)/oid_registry.c: $(obj)/oid_registry_data.c 166$(obj)/oid_registry.o: $(obj)/oid_registry_data.c
167 167
168$(obj)/oid_registry_data.c: $(srctree)/include/linux/oid_registry.h \ 168$(obj)/oid_registry_data.c: $(srctree)/include/linux/oid_registry.h \
169 $(src)/build_OID_registry 169 $(src)/build_OID_registry
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index de2c8b5a715b..5293d2433029 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -91,7 +91,7 @@ next_tag:
91 91
92 /* Extract the length */ 92 /* Extract the length */
93 len = data[dp++]; 93 len = data[dp++];
94 if (len < 0x7f) { 94 if (len <= 0x7f) {
95 dp += len; 95 dp += len;
96 goto next_tag; 96 goto next_tag;
97 } 97 }
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 06fdfa1aeba7..06f7e4fe8d2d 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -353,7 +353,7 @@ again:
353EXPORT_SYMBOL(bitmap_find_next_zero_area); 353EXPORT_SYMBOL(bitmap_find_next_zero_area);
354 354
355/* 355/*
356 * Bitmap printing & parsing functions: first version by Bill Irwin, 356 * Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
357 * second version by Paul Jackson, third by Joe Korty. 357 * second version by Paul Jackson, third by Joe Korty.
358 */ 358 */
359 359
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 402a54ac35cb..d327b87c99b7 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -161,6 +161,6 @@ EXPORT_SYMBOL(free_cpumask_var);
161 */ 161 */
162void __init free_bootmem_cpumask_var(cpumask_var_t mask) 162void __init free_bootmem_cpumask_var(cpumask_var_t mask)
163{ 163{
164 free_bootmem((unsigned long)mask, cpumask_size()); 164 free_bootmem(__pa(mask), cpumask_size());
165} 165}
166#endif 166#endif
diff --git a/lib/earlycpio.c b/lib/earlycpio.c
new file mode 100644
index 000000000000..8078ef49cb79
--- /dev/null
+++ b/lib/earlycpio.c
@@ -0,0 +1,145 @@
1/* ----------------------------------------------------------------------- *
2 *
3 * Copyright 2012 Intel Corporation; author H. Peter Anvin
4 *
5 * This file is part of the Linux kernel, and is made available
6 * under the terms of the GNU General Public License version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * ----------------------------------------------------------------------- */
15
16/*
17 * earlycpio.c
18 *
19 * Find a specific cpio member; must precede any compressed content.
20 * This is used to locate data items in the initramfs used by the
21 * kernel itself during early boot (before the main initramfs is
22 * decompressed.) It is the responsibility of the initramfs creator
23 * to ensure that these items are uncompressed at the head of the
24 * blob. Depending on the boot loader or package tool that may be a
25 * separate file or part of the same file.
26 */
27
28#include <linux/earlycpio.h>
29#include <linux/kernel.h>
30#include <linux/string.h>
31
32enum cpio_fields {
33 C_MAGIC,
34 C_INO,
35 C_MODE,
36 C_UID,
37 C_GID,
38 C_NLINK,
39 C_MTIME,
40 C_FILESIZE,
41 C_MAJ,
42 C_MIN,
43 C_RMAJ,
44 C_RMIN,
45 C_NAMESIZE,
46 C_CHKSUM,
47 C_NFIELDS
48};
49
50/**
51 * cpio_data find_cpio_data - Search for files in an uncompressed cpio
52 * @path: The directory to search for, including a slash at the end
53 * @data: Pointer to the the cpio archive or a header inside
54 * @len: Remaining length of the cpio based on data pointer
55 * @offset: When a matching file is found, this is the offset to the
56 * beginning of the cpio. It can be used to iterate through
57 * the cpio to find all files inside of a directory path
58 *
59 * @return: struct cpio_data containing the address, length and
60 * filename (with the directory path cut off) of the found file.
61 * If you search for a filename and not for files in a directory,
62 * pass the absolute path of the filename in the cpio and make sure
63 * the match returned an empty filename string.
64 */
65
66struct cpio_data __cpuinit find_cpio_data(const char *path, void *data,
67 size_t len, long *offset)
68{
69 const size_t cpio_header_len = 8*C_NFIELDS - 2;
70 struct cpio_data cd = { NULL, 0, "" };
71 const char *p, *dptr, *nptr;
72 unsigned int ch[C_NFIELDS], *chp, v;
73 unsigned char c, x;
74 size_t mypathsize = strlen(path);
75 int i, j;
76
77 p = data;
78
79 while (len > cpio_header_len) {
80 if (!*p) {
81 /* All cpio headers need to be 4-byte aligned */
82 p += 4;
83 len -= 4;
84 continue;
85 }
86
87 j = 6; /* The magic field is only 6 characters */
88 chp = ch;
89 for (i = C_NFIELDS; i; i--) {
90 v = 0;
91 while (j--) {
92 v <<= 4;
93 c = *p++;
94
95 x = c - '0';
96 if (x < 10) {
97 v += x;
98 continue;
99 }
100
101 x = (c | 0x20) - 'a';
102 if (x < 6) {
103 v += x + 10;
104 continue;
105 }
106
107 goto quit; /* Invalid hexadecimal */
108 }
109 *chp++ = v;
110 j = 8; /* All other fields are 8 characters */
111 }
112
113 if ((ch[C_MAGIC] - 0x070701) > 1)
114 goto quit; /* Invalid magic */
115
116 len -= cpio_header_len;
117
118 dptr = PTR_ALIGN(p + ch[C_NAMESIZE], 4);
119 nptr = PTR_ALIGN(dptr + ch[C_FILESIZE], 4);
120
121 if (nptr > p + len || dptr < p || nptr < dptr)
122 goto quit; /* Buffer overrun */
123
124 if ((ch[C_MODE] & 0170000) == 0100000 &&
125 ch[C_NAMESIZE] >= mypathsize &&
126 !memcmp(p, path, mypathsize)) {
127 *offset = (long)nptr - (long)data;
128 if (ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME) {
129 pr_warn(
130 "File %s exceeding MAX_CPIO_FILE_NAME [%d]\n",
131 p, MAX_CPIO_FILE_NAME);
132 }
133 strlcpy(cd.name, p + mypathsize, MAX_CPIO_FILE_NAME);
134
135 cd.data = (void *)dptr;
136 cd.size = ch[C_FILESIZE];
137 return cd; /* Found it! */
138 }
139 len -= (nptr - p);
140 p = nptr;
141 }
142
143quit:
144 return cd;
145}
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 678ce4f1e124..095ab157a521 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -641,7 +641,14 @@ do { \
641 ************** MIPS ***************** 641 ************** MIPS *****************
642 ***************************************/ 642 ***************************************/
643#if defined(__mips__) && W_TYPE_SIZE == 32 643#if defined(__mips__) && W_TYPE_SIZE == 32
644#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7 644#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
645#define umul_ppmm(w1, w0, u, v) \
646do { \
647 UDItype __ll = (UDItype)(u) * (v); \
648 w1 = __ll >> 32; \
649 w0 = __ll; \
650} while (0)
651#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
645#define umul_ppmm(w1, w0, u, v) \ 652#define umul_ppmm(w1, w0, u, v) \
646 __asm__ ("multu %2,%3" \ 653 __asm__ ("multu %2,%3" \
647 : "=l" ((USItype)(w0)), \ 654 : "=l" ((USItype)(w0)), \
@@ -666,7 +673,15 @@ do { \
666 ************** MIPS/64 ************** 673 ************** MIPS/64 **************
667 ***************************************/ 674 ***************************************/
668#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 675#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
669#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7 676#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
677#define umul_ppmm(w1, w0, u, v) \
678do { \
679 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
680 __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \
681 w1 = __ll >> 64; \
682 w0 = __ll; \
683} while (0)
684#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
670#define umul_ppmm(w1, w0, u, v) \ 685#define umul_ppmm(w1, w0, u, v) \
671 __asm__ ("dmultu %2,%3" \ 686 __asm__ ("dmultu %2,%3" \
672 : "=l" ((UDItype)(w0)), \ 687 : "=l" ((UDItype)(w0)), \
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index f114bf6a8e13..196b06984dec 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -57,7 +57,7 @@ int swiotlb_force;
57 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this 57 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
58 * API. 58 * API.
59 */ 59 */
60static char *io_tlb_start, *io_tlb_end; 60static phys_addr_t io_tlb_start, io_tlb_end;
61 61
62/* 62/*
63 * The number of IO TLB blocks (in groups of 64) between io_tlb_start and 63 * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
@@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs;
70 */ 70 */
71static unsigned long io_tlb_overflow = 32*1024; 71static unsigned long io_tlb_overflow = 32*1024;
72 72
73static void *io_tlb_overflow_buffer; 73static phys_addr_t io_tlb_overflow_buffer;
74 74
75/* 75/*
76 * This is a free list describing the number of free entries available from 76 * This is a free list describing the number of free entries available from
@@ -125,27 +125,38 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
125void swiotlb_print_info(void) 125void swiotlb_print_info(void)
126{ 126{
127 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; 127 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
128 phys_addr_t pstart, pend; 128 unsigned char *vstart, *vend;
129 129
130 pstart = virt_to_phys(io_tlb_start); 130 vstart = phys_to_virt(io_tlb_start);
131 pend = virt_to_phys(io_tlb_end); 131 vend = phys_to_virt(io_tlb_end);
132 132
133 printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n", 133 printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
134 (unsigned long long)pstart, (unsigned long long)pend - 1, 134 (unsigned long long)io_tlb_start,
135 bytes >> 20, io_tlb_start, io_tlb_end - 1); 135 (unsigned long long)io_tlb_end,
136 bytes >> 20, vstart, vend - 1);
136} 137}
137 138
138void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) 139void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
139{ 140{
141 void *v_overflow_buffer;
140 unsigned long i, bytes; 142 unsigned long i, bytes;
141 143
142 bytes = nslabs << IO_TLB_SHIFT; 144 bytes = nslabs << IO_TLB_SHIFT;
143 145
144 io_tlb_nslabs = nslabs; 146 io_tlb_nslabs = nslabs;
145 io_tlb_start = tlb; 147 io_tlb_start = __pa(tlb);
146 io_tlb_end = io_tlb_start + bytes; 148 io_tlb_end = io_tlb_start + bytes;
147 149
148 /* 150 /*
151 * Get the overflow emergency buffer
152 */
153 v_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
154 if (!v_overflow_buffer)
155 panic("Cannot allocate SWIOTLB overflow buffer!\n");
156
157 io_tlb_overflow_buffer = __pa(v_overflow_buffer);
158
159 /*
149 * Allocate and initialize the free list array. This array is used 160 * Allocate and initialize the free list array. This array is used
150 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 161 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
151 * between io_tlb_start and io_tlb_end. 162 * between io_tlb_start and io_tlb_end.
@@ -156,12 +167,6 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
156 io_tlb_index = 0; 167 io_tlb_index = 0;
157 io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 168 io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
158 169
159 /*
160 * Get the overflow emergency buffer
161 */
162 io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
163 if (!io_tlb_overflow_buffer)
164 panic("Cannot allocate SWIOTLB overflow buffer!\n");
165 if (verbose) 170 if (verbose)
166 swiotlb_print_info(); 171 swiotlb_print_info();
167} 172}
@@ -173,6 +178,7 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
173static void __init 178static void __init
174swiotlb_init_with_default_size(size_t default_size, int verbose) 179swiotlb_init_with_default_size(size_t default_size, int verbose)
175{ 180{
181 unsigned char *vstart;
176 unsigned long bytes; 182 unsigned long bytes;
177 183
178 if (!io_tlb_nslabs) { 184 if (!io_tlb_nslabs) {
@@ -185,11 +191,11 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
185 /* 191 /*
186 * Get IO TLB memory from the low pages 192 * Get IO TLB memory from the low pages
187 */ 193 */
188 io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); 194 vstart = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
189 if (!io_tlb_start) 195 if (!vstart)
190 panic("Cannot allocate SWIOTLB buffer"); 196 panic("Cannot allocate SWIOTLB buffer");
191 197
192 swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose); 198 swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose);
193} 199}
194 200
195void __init 201void __init
@@ -207,6 +213,7 @@ int
207swiotlb_late_init_with_default_size(size_t default_size) 213swiotlb_late_init_with_default_size(size_t default_size)
208{ 214{
209 unsigned long bytes, req_nslabs = io_tlb_nslabs; 215 unsigned long bytes, req_nslabs = io_tlb_nslabs;
216 unsigned char *vstart = NULL;
210 unsigned int order; 217 unsigned int order;
211 int rc = 0; 218 int rc = 0;
212 219
@@ -223,14 +230,14 @@ swiotlb_late_init_with_default_size(size_t default_size)
223 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 230 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
224 231
225 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 232 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
226 io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 233 vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
227 order); 234 order);
228 if (io_tlb_start) 235 if (vstart)
229 break; 236 break;
230 order--; 237 order--;
231 } 238 }
232 239
233 if (!io_tlb_start) { 240 if (!vstart) {
234 io_tlb_nslabs = req_nslabs; 241 io_tlb_nslabs = req_nslabs;
235 return -ENOMEM; 242 return -ENOMEM;
236 } 243 }
@@ -239,9 +246,9 @@ swiotlb_late_init_with_default_size(size_t default_size)
239 "for software IO TLB\n", (PAGE_SIZE << order) >> 20); 246 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
240 io_tlb_nslabs = SLABS_PER_PAGE << order; 247 io_tlb_nslabs = SLABS_PER_PAGE << order;
241 } 248 }
242 rc = swiotlb_late_init_with_tbl(io_tlb_start, io_tlb_nslabs); 249 rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
243 if (rc) 250 if (rc)
244 free_pages((unsigned long)io_tlb_start, order); 251 free_pages((unsigned long)vstart, order);
245 return rc; 252 return rc;
246} 253}
247 254
@@ -249,14 +256,25 @@ int
249swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) 256swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
250{ 257{
251 unsigned long i, bytes; 258 unsigned long i, bytes;
259 unsigned char *v_overflow_buffer;
252 260
253 bytes = nslabs << IO_TLB_SHIFT; 261 bytes = nslabs << IO_TLB_SHIFT;
254 262
255 io_tlb_nslabs = nslabs; 263 io_tlb_nslabs = nslabs;
256 io_tlb_start = tlb; 264 io_tlb_start = virt_to_phys(tlb);
257 io_tlb_end = io_tlb_start + bytes; 265 io_tlb_end = io_tlb_start + bytes;
258 266
259 memset(io_tlb_start, 0, bytes); 267 memset(tlb, 0, bytes);
268
269 /*
270 * Get the overflow emergency buffer
271 */
272 v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
273 get_order(io_tlb_overflow));
274 if (!v_overflow_buffer)
275 goto cleanup2;
276
277 io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
260 278
261 /* 279 /*
262 * Allocate and initialize the free list array. This array is used 280 * Allocate and initialize the free list array. This array is used
@@ -266,7 +284,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
266 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, 284 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
267 get_order(io_tlb_nslabs * sizeof(int))); 285 get_order(io_tlb_nslabs * sizeof(int)));
268 if (!io_tlb_list) 286 if (!io_tlb_list)
269 goto cleanup2; 287 goto cleanup3;
270 288
271 for (i = 0; i < io_tlb_nslabs; i++) 289 for (i = 0; i < io_tlb_nslabs; i++)
272 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 290 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
@@ -277,18 +295,10 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
277 get_order(io_tlb_nslabs * 295 get_order(io_tlb_nslabs *
278 sizeof(phys_addr_t))); 296 sizeof(phys_addr_t)));
279 if (!io_tlb_orig_addr) 297 if (!io_tlb_orig_addr)
280 goto cleanup3; 298 goto cleanup4;
281 299
282 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t)); 300 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
283 301
284 /*
285 * Get the overflow emergency buffer
286 */
287 io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
288 get_order(io_tlb_overflow));
289 if (!io_tlb_overflow_buffer)
290 goto cleanup4;
291
292 swiotlb_print_info(); 302 swiotlb_print_info();
293 303
294 late_alloc = 1; 304 late_alloc = 1;
@@ -296,42 +306,42 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
296 return 0; 306 return 0;
297 307
298cleanup4: 308cleanup4:
299 free_pages((unsigned long)io_tlb_orig_addr,
300 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
301 io_tlb_orig_addr = NULL;
302cleanup3:
303 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 309 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
304 sizeof(int))); 310 sizeof(int)));
305 io_tlb_list = NULL; 311 io_tlb_list = NULL;
312cleanup3:
313 free_pages((unsigned long)v_overflow_buffer,
314 get_order(io_tlb_overflow));
315 io_tlb_overflow_buffer = 0;
306cleanup2: 316cleanup2:
307 io_tlb_end = NULL; 317 io_tlb_end = 0;
308 io_tlb_start = NULL; 318 io_tlb_start = 0;
309 io_tlb_nslabs = 0; 319 io_tlb_nslabs = 0;
310 return -ENOMEM; 320 return -ENOMEM;
311} 321}
312 322
313void __init swiotlb_free(void) 323void __init swiotlb_free(void)
314{ 324{
315 if (!io_tlb_overflow_buffer) 325 if (!io_tlb_orig_addr)
316 return; 326 return;
317 327
318 if (late_alloc) { 328 if (late_alloc) {
319 free_pages((unsigned long)io_tlb_overflow_buffer, 329 free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
320 get_order(io_tlb_overflow)); 330 get_order(io_tlb_overflow));
321 free_pages((unsigned long)io_tlb_orig_addr, 331 free_pages((unsigned long)io_tlb_orig_addr,
322 get_order(io_tlb_nslabs * sizeof(phys_addr_t))); 332 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
323 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 333 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
324 sizeof(int))); 334 sizeof(int)));
325 free_pages((unsigned long)io_tlb_start, 335 free_pages((unsigned long)phys_to_virt(io_tlb_start),
326 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 336 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
327 } else { 337 } else {
328 free_bootmem_late(__pa(io_tlb_overflow_buffer), 338 free_bootmem_late(io_tlb_overflow_buffer,
329 PAGE_ALIGN(io_tlb_overflow)); 339 PAGE_ALIGN(io_tlb_overflow));
330 free_bootmem_late(__pa(io_tlb_orig_addr), 340 free_bootmem_late(__pa(io_tlb_orig_addr),
331 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 341 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
332 free_bootmem_late(__pa(io_tlb_list), 342 free_bootmem_late(__pa(io_tlb_list),
333 PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); 343 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
334 free_bootmem_late(__pa(io_tlb_start), 344 free_bootmem_late(io_tlb_start,
335 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 345 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
336 } 346 }
337 io_tlb_nslabs = 0; 347 io_tlb_nslabs = 0;
@@ -339,21 +349,21 @@ void __init swiotlb_free(void)
339 349
340static int is_swiotlb_buffer(phys_addr_t paddr) 350static int is_swiotlb_buffer(phys_addr_t paddr)
341{ 351{
342 return paddr >= virt_to_phys(io_tlb_start) && 352 return paddr >= io_tlb_start && paddr < io_tlb_end;
343 paddr < virt_to_phys(io_tlb_end);
344} 353}
345 354
346/* 355/*
347 * Bounce: copy the swiotlb buffer back to the original dma location 356 * Bounce: copy the swiotlb buffer back to the original dma location
348 */ 357 */
349void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 358static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
350 enum dma_data_direction dir) 359 size_t size, enum dma_data_direction dir)
351{ 360{
352 unsigned long pfn = PFN_DOWN(phys); 361 unsigned long pfn = PFN_DOWN(orig_addr);
362 unsigned char *vaddr = phys_to_virt(tlb_addr);
353 363
354 if (PageHighMem(pfn_to_page(pfn))) { 364 if (PageHighMem(pfn_to_page(pfn))) {
355 /* The buffer does not have a mapping. Map it in and copy */ 365 /* The buffer does not have a mapping. Map it in and copy */
356 unsigned int offset = phys & ~PAGE_MASK; 366 unsigned int offset = orig_addr & ~PAGE_MASK;
357 char *buffer; 367 char *buffer;
358 unsigned int sz = 0; 368 unsigned int sz = 0;
359 unsigned long flags; 369 unsigned long flags;
@@ -364,32 +374,31 @@ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
364 local_irq_save(flags); 374 local_irq_save(flags);
365 buffer = kmap_atomic(pfn_to_page(pfn)); 375 buffer = kmap_atomic(pfn_to_page(pfn));
366 if (dir == DMA_TO_DEVICE) 376 if (dir == DMA_TO_DEVICE)
367 memcpy(dma_addr, buffer + offset, sz); 377 memcpy(vaddr, buffer + offset, sz);
368 else 378 else
369 memcpy(buffer + offset, dma_addr, sz); 379 memcpy(buffer + offset, vaddr, sz);
370 kunmap_atomic(buffer); 380 kunmap_atomic(buffer);
371 local_irq_restore(flags); 381 local_irq_restore(flags);
372 382
373 size -= sz; 383 size -= sz;
374 pfn++; 384 pfn++;
375 dma_addr += sz; 385 vaddr += sz;
376 offset = 0; 386 offset = 0;
377 } 387 }
388 } else if (dir == DMA_TO_DEVICE) {
389 memcpy(vaddr, phys_to_virt(orig_addr), size);
378 } else { 390 } else {
379 if (dir == DMA_TO_DEVICE) 391 memcpy(phys_to_virt(orig_addr), vaddr, size);
380 memcpy(dma_addr, phys_to_virt(phys), size);
381 else
382 memcpy(phys_to_virt(phys), dma_addr, size);
383 } 392 }
384} 393}
385EXPORT_SYMBOL_GPL(swiotlb_bounce);
386 394
387void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, 395phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
388 phys_addr_t phys, size_t size, 396 dma_addr_t tbl_dma_addr,
389 enum dma_data_direction dir) 397 phys_addr_t orig_addr, size_t size,
398 enum dma_data_direction dir)
390{ 399{
391 unsigned long flags; 400 unsigned long flags;
392 char *dma_addr; 401 phys_addr_t tlb_addr;
393 unsigned int nslots, stride, index, wrap; 402 unsigned int nslots, stride, index, wrap;
394 int i; 403 int i;
395 unsigned long mask; 404 unsigned long mask;
@@ -453,7 +462,7 @@ void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
453 io_tlb_list[i] = 0; 462 io_tlb_list[i] = 0;
454 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) 463 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
455 io_tlb_list[i] = ++count; 464 io_tlb_list[i] = ++count;
456 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); 465 tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
457 466
458 /* 467 /*
459 * Update the indices to avoid searching in the next 468 * Update the indices to avoid searching in the next
@@ -471,7 +480,7 @@ void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
471 480
472not_found: 481not_found:
473 spin_unlock_irqrestore(&io_tlb_lock, flags); 482 spin_unlock_irqrestore(&io_tlb_lock, flags);
474 return NULL; 483 return SWIOTLB_MAP_ERROR;
475found: 484found:
476 spin_unlock_irqrestore(&io_tlb_lock, flags); 485 spin_unlock_irqrestore(&io_tlb_lock, flags);
477 486
@@ -481,11 +490,11 @@ found:
481 * needed. 490 * needed.
482 */ 491 */
483 for (i = 0; i < nslots; i++) 492 for (i = 0; i < nslots; i++)
484 io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT); 493 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
485 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 494 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
486 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE); 495 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
487 496
488 return dma_addr; 497 return tlb_addr;
489} 498}
490EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); 499EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
491 500
@@ -493,11 +502,10 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
493 * Allocates bounce buffer and returns its kernel virtual address. 502 * Allocates bounce buffer and returns its kernel virtual address.
494 */ 503 */
495 504
496static void * 505phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size,
497map_single(struct device *hwdev, phys_addr_t phys, size_t size, 506 enum dma_data_direction dir)
498 enum dma_data_direction dir)
499{ 507{
500 dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start); 508 dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
501 509
502 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir); 510 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
503} 511}
@@ -505,20 +513,19 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size,
505/* 513/*
506 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 514 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
507 */ 515 */
508void 516void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
509swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, 517 size_t size, enum dma_data_direction dir)
510 enum dma_data_direction dir)
511{ 518{
512 unsigned long flags; 519 unsigned long flags;
513 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 520 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
514 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 521 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
515 phys_addr_t phys = io_tlb_orig_addr[index]; 522 phys_addr_t orig_addr = io_tlb_orig_addr[index];
516 523
517 /* 524 /*
518 * First, sync the memory before unmapping the entry 525 * First, sync the memory before unmapping the entry
519 */ 526 */
520 if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 527 if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
521 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE); 528 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
522 529
523 /* 530 /*
524 * Return the buffer to the free list by setting the corresponding 531 * Return the buffer to the free list by setting the corresponding
@@ -547,26 +554,27 @@ swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
547} 554}
548EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single); 555EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
549 556
550void 557void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
551swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size, 558 size_t size, enum dma_data_direction dir,
552 enum dma_data_direction dir, 559 enum dma_sync_target target)
553 enum dma_sync_target target)
554{ 560{
555 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 561 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
556 phys_addr_t phys = io_tlb_orig_addr[index]; 562 phys_addr_t orig_addr = io_tlb_orig_addr[index];
557 563
558 phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1)); 564 orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
559 565
560 switch (target) { 566 switch (target) {
561 case SYNC_FOR_CPU: 567 case SYNC_FOR_CPU:
562 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 568 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
563 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE); 569 swiotlb_bounce(orig_addr, tlb_addr,
570 size, DMA_FROM_DEVICE);
564 else 571 else
565 BUG_ON(dir != DMA_TO_DEVICE); 572 BUG_ON(dir != DMA_TO_DEVICE);
566 break; 573 break;
567 case SYNC_FOR_DEVICE: 574 case SYNC_FOR_DEVICE:
568 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 575 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
569 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE); 576 swiotlb_bounce(orig_addr, tlb_addr,
577 size, DMA_TO_DEVICE);
570 else 578 else
571 BUG_ON(dir != DMA_FROM_DEVICE); 579 BUG_ON(dir != DMA_FROM_DEVICE);
572 break; 580 break;
@@ -589,12 +597,15 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
589 dma_mask = hwdev->coherent_dma_mask; 597 dma_mask = hwdev->coherent_dma_mask;
590 598
591 ret = (void *)__get_free_pages(flags, order); 599 ret = (void *)__get_free_pages(flags, order);
592 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) { 600 if (ret) {
593 /* 601 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
594 * The allocated memory isn't reachable by the device. 602 if (dev_addr + size - 1 > dma_mask) {
595 */ 603 /*
596 free_pages((unsigned long) ret, order); 604 * The allocated memory isn't reachable by the device.
597 ret = NULL; 605 */
606 free_pages((unsigned long) ret, order);
607 ret = NULL;
608 }
598 } 609 }
599 if (!ret) { 610 if (!ret) {
600 /* 611 /*
@@ -602,25 +613,29 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
602 * GFP_DMA memory; fall back on map_single(), which 613 * GFP_DMA memory; fall back on map_single(), which
603 * will grab memory from the lowest available address range. 614 * will grab memory from the lowest available address range.
604 */ 615 */
605 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); 616 phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
606 if (!ret) 617 if (paddr == SWIOTLB_MAP_ERROR)
607 return NULL; 618 return NULL;
608 }
609 619
610 memset(ret, 0, size); 620 ret = phys_to_virt(paddr);
611 dev_addr = swiotlb_virt_to_bus(hwdev, ret); 621 dev_addr = phys_to_dma(hwdev, paddr);
612 622
613 /* Confirm address can be DMA'd by device */ 623 /* Confirm address can be DMA'd by device */
614 if (dev_addr + size - 1 > dma_mask) { 624 if (dev_addr + size - 1 > dma_mask) {
615 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 625 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
616 (unsigned long long)dma_mask, 626 (unsigned long long)dma_mask,
617 (unsigned long long)dev_addr); 627 (unsigned long long)dev_addr);
618 628
619 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 629 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
620 swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 630 swiotlb_tbl_unmap_single(hwdev, paddr,
621 return NULL; 631 size, DMA_TO_DEVICE);
632 return NULL;
633 }
622 } 634 }
635
623 *dma_handle = dev_addr; 636 *dma_handle = dev_addr;
637 memset(ret, 0, size);
638
624 return ret; 639 return ret;
625} 640}
626EXPORT_SYMBOL(swiotlb_alloc_coherent); 641EXPORT_SYMBOL(swiotlb_alloc_coherent);
@@ -636,7 +651,7 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
636 free_pages((unsigned long)vaddr, get_order(size)); 651 free_pages((unsigned long)vaddr, get_order(size));
637 else 652 else
638 /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */ 653 /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
639 swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 654 swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE);
640} 655}
641EXPORT_SYMBOL(swiotlb_free_coherent); 656EXPORT_SYMBOL(swiotlb_free_coherent);
642 657
@@ -677,9 +692,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
677 enum dma_data_direction dir, 692 enum dma_data_direction dir,
678 struct dma_attrs *attrs) 693 struct dma_attrs *attrs)
679{ 694{
680 phys_addr_t phys = page_to_phys(page) + offset; 695 phys_addr_t map, phys = page_to_phys(page) + offset;
681 dma_addr_t dev_addr = phys_to_dma(dev, phys); 696 dma_addr_t dev_addr = phys_to_dma(dev, phys);
682 void *map;
683 697
684 BUG_ON(dir == DMA_NONE); 698 BUG_ON(dir == DMA_NONE);
685 /* 699 /*
@@ -690,23 +704,19 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
690 if (dma_capable(dev, dev_addr, size) && !swiotlb_force) 704 if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
691 return dev_addr; 705 return dev_addr;
692 706
693 /* 707 /* Oh well, have to allocate and map a bounce buffer. */
694 * Oh well, have to allocate and map a bounce buffer.
695 */
696 map = map_single(dev, phys, size, dir); 708 map = map_single(dev, phys, size, dir);
697 if (!map) { 709 if (map == SWIOTLB_MAP_ERROR) {
698 swiotlb_full(dev, size, dir, 1); 710 swiotlb_full(dev, size, dir, 1);
699 map = io_tlb_overflow_buffer; 711 return phys_to_dma(dev, io_tlb_overflow_buffer);
700 } 712 }
701 713
702 dev_addr = swiotlb_virt_to_bus(dev, map); 714 dev_addr = phys_to_dma(dev, map);
703 715
704 /* 716 /* Ensure that the address returned is DMA'ble */
705 * Ensure that the address returned is DMA'ble
706 */
707 if (!dma_capable(dev, dev_addr, size)) { 717 if (!dma_capable(dev, dev_addr, size)) {
708 swiotlb_tbl_unmap_single(dev, map, size, dir); 718 swiotlb_tbl_unmap_single(dev, map, size, dir);
709 dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer); 719 return phys_to_dma(dev, io_tlb_overflow_buffer);
710 } 720 }
711 721
712 return dev_addr; 722 return dev_addr;
@@ -729,7 +739,7 @@ static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
729 BUG_ON(dir == DMA_NONE); 739 BUG_ON(dir == DMA_NONE);
730 740
731 if (is_swiotlb_buffer(paddr)) { 741 if (is_swiotlb_buffer(paddr)) {
732 swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir); 742 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
733 return; 743 return;
734 } 744 }
735 745
@@ -773,8 +783,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
773 BUG_ON(dir == DMA_NONE); 783 BUG_ON(dir == DMA_NONE);
774 784
775 if (is_swiotlb_buffer(paddr)) { 785 if (is_swiotlb_buffer(paddr)) {
776 swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir, 786 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
777 target);
778 return; 787 return;
779 } 788 }
780 789
@@ -831,9 +840,9 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
831 840
832 if (swiotlb_force || 841 if (swiotlb_force ||
833 !dma_capable(hwdev, dev_addr, sg->length)) { 842 !dma_capable(hwdev, dev_addr, sg->length)) {
834 void *map = map_single(hwdev, sg_phys(sg), 843 phys_addr_t map = map_single(hwdev, sg_phys(sg),
835 sg->length, dir); 844 sg->length, dir);
836 if (!map) { 845 if (map == SWIOTLB_MAP_ERROR) {
837 /* Don't panic here, we expect map_sg users 846 /* Don't panic here, we expect map_sg users
838 to do proper error handling. */ 847 to do proper error handling. */
839 swiotlb_full(hwdev, sg->length, dir, 0); 848 swiotlb_full(hwdev, sg->length, dir, 0);
@@ -842,7 +851,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
842 sgl[0].dma_length = 0; 851 sgl[0].dma_length = 0;
843 return 0; 852 return 0;
844 } 853 }
845 sg->dma_address = swiotlb_virt_to_bus(hwdev, map); 854 sg->dma_address = phys_to_dma(hwdev, map);
846 } else 855 } else
847 sg->dma_address = dev_addr; 856 sg->dma_address = dev_addr;
848 sg->dma_length = sg->length; 857 sg->dma_length = sg->length;
@@ -925,7 +934,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
925int 934int
926swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 935swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
927{ 936{
928 return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer)); 937 return (dma_addr == phys_to_dma(hwdev, io_tlb_overflow_buffer));
929} 938}
930EXPORT_SYMBOL(swiotlb_dma_mapping_error); 939EXPORT_SYMBOL(swiotlb_dma_mapping_error);
931 940
@@ -938,6 +947,6 @@ EXPORT_SYMBOL(swiotlb_dma_mapping_error);
938int 947int
939swiotlb_dma_supported(struct device *hwdev, u64 mask) 948swiotlb_dma_supported(struct device *hwdev, u64 mask)
940{ 949{
941 return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask; 950 return phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
942} 951}
943EXPORT_SYMBOL(swiotlb_dma_supported); 952EXPORT_SYMBOL(swiotlb_dma_supported);