diff options
author | Tony Luck <tony.luck@intel.com> | 2007-03-06 16:31:45 -0500 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2007-03-06 16:31:45 -0500 |
commit | 25667d675454f2cd258c5fa798a2281af1ef2ae9 (patch) | |
tree | 9724ea546a5976dc0d9c4de9263b8a2a82254123 | |
parent | c3442e296517aee733d62fc3fe03211598902c7d (diff) |
Revert "[IA64] swiotlb abstraction (e.g. for Xen)"
This reverts commit 51099005ab8e09d68a13fea8d55bc739c1040ca6.
-rw-r--r-- | include/asm-ia64/swiotlb.h | 9 | ||||
-rw-r--r-- | include/asm-x86_64/swiotlb.h | 1 | ||||
-rw-r--r-- | lib/swiotlb.c | 184 |
3 files changed, 35 insertions, 159 deletions
diff --git a/include/asm-ia64/swiotlb.h b/include/asm-ia64/swiotlb.h deleted file mode 100644 index 452c162dee4e..000000000000 --- a/include/asm-ia64/swiotlb.h +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | #ifndef _ASM_SWIOTLB_H | ||
2 | #define _ASM_SWIOTLB_H 1 | ||
3 | |||
4 | #include <asm/machvec.h> | ||
5 | |||
6 | #define SWIOTLB_ARCH_NEED_LATE_INIT | ||
7 | #define SWIOTLB_ARCH_NEED_ALLOC | ||
8 | |||
9 | #endif /* _ASM_SWIOTLB_H */ | ||
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h index ab913ffcad56..f9c589539a82 100644 --- a/include/asm-x86_64/swiotlb.h +++ b/include/asm-x86_64/swiotlb.h | |||
@@ -44,7 +44,6 @@ extern void swiotlb_init(void); | |||
44 | extern int swiotlb_force; | 44 | extern int swiotlb_force; |
45 | 45 | ||
46 | #ifdef CONFIG_SWIOTLB | 46 | #ifdef CONFIG_SWIOTLB |
47 | #define SWIOTLB_ARCH_NEED_ALLOC | ||
48 | extern int swiotlb; | 47 | extern int swiotlb; |
49 | #else | 48 | #else |
50 | #define swiotlb 0 | 49 | #define swiotlb 0 |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 623a68af8b18..9970e55c90bd 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <asm/io.h> | 28 | #include <asm/io.h> |
29 | #include <asm/dma.h> | 29 | #include <asm/dma.h> |
30 | #include <asm/scatterlist.h> | 30 | #include <asm/scatterlist.h> |
31 | #include <asm/swiotlb.h> | ||
32 | 31 | ||
33 | #include <linux/init.h> | 32 | #include <linux/init.h> |
34 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
@@ -36,10 +35,8 @@ | |||
36 | #define OFFSET(val,align) ((unsigned long) \ | 35 | #define OFFSET(val,align) ((unsigned long) \ |
37 | ( (val) & ( (align) - 1))) | 36 | ( (val) & ( (align) - 1))) |
38 | 37 | ||
39 | #ifndef SG_ENT_VIRT_ADDRESS | ||
40 | #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) | 38 | #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) |
41 | #define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)) | 39 | #define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)) |
42 | #endif | ||
43 | 40 | ||
44 | /* | 41 | /* |
45 | * Maximum allowable number of contiguous slabs to map, | 42 | * Maximum allowable number of contiguous slabs to map, |
@@ -104,25 +101,13 @@ static unsigned int io_tlb_index; | |||
104 | * We need to save away the original address corresponding to a mapped entry | 101 | * We need to save away the original address corresponding to a mapped entry |
105 | * for the sync operations. | 102 | * for the sync operations. |
106 | */ | 103 | */ |
107 | #ifndef SWIOTLB_ARCH_HAS_IO_TLB_ADDR_T | 104 | static unsigned char **io_tlb_orig_addr; |
108 | typedef char *io_tlb_addr_t; | ||
109 | #define swiotlb_orig_addr_null(buffer) (!(buffer)) | ||
110 | #define ptr_to_io_tlb_addr(ptr) (ptr) | ||
111 | #define page_to_io_tlb_addr(pg, off) (page_address(pg) + (off)) | ||
112 | #define sg_to_io_tlb_addr(sg) SG_ENT_VIRT_ADDRESS(sg) | ||
113 | #endif | ||
114 | static io_tlb_addr_t *io_tlb_orig_addr; | ||
115 | 105 | ||
116 | /* | 106 | /* |
117 | * Protect the above data structures in the map and unmap calls | 107 | * Protect the above data structures in the map and unmap calls |
118 | */ | 108 | */ |
119 | static DEFINE_SPINLOCK(io_tlb_lock); | 109 | static DEFINE_SPINLOCK(io_tlb_lock); |
120 | 110 | ||
121 | #ifdef SWIOTLB_EXTRA_VARIABLES | ||
122 | SWIOTLB_EXTRA_VARIABLES; | ||
123 | #endif | ||
124 | |||
125 | #ifndef SWIOTLB_ARCH_HAS_SETUP_IO_TLB_NPAGES | ||
126 | static int __init | 111 | static int __init |
127 | setup_io_tlb_npages(char *str) | 112 | setup_io_tlb_npages(char *str) |
128 | { | 113 | { |
@@ -137,25 +122,9 @@ setup_io_tlb_npages(char *str) | |||
137 | swiotlb_force = 1; | 122 | swiotlb_force = 1; |
138 | return 1; | 123 | return 1; |
139 | } | 124 | } |
140 | #endif | ||
141 | __setup("swiotlb=", setup_io_tlb_npages); | 125 | __setup("swiotlb=", setup_io_tlb_npages); |
142 | /* make io_tlb_overflow tunable too? */ | 126 | /* make io_tlb_overflow tunable too? */ |
143 | 127 | ||
144 | #ifndef swiotlb_adjust_size | ||
145 | #define swiotlb_adjust_size(size) ((void)0) | ||
146 | #endif | ||
147 | |||
148 | #ifndef swiotlb_adjust_seg | ||
149 | #define swiotlb_adjust_seg(start, size) ((void)0) | ||
150 | #endif | ||
151 | |||
152 | #ifndef swiotlb_print_info | ||
153 | #define swiotlb_print_info(bytes) \ | ||
154 | printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " \ | ||
155 | "0x%lx\n", bytes >> 20, \ | ||
156 | virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)) | ||
157 | #endif | ||
158 | |||
159 | /* | 128 | /* |
160 | * Statically reserve bounce buffer space and initialize bounce buffer data | 129 | * Statically reserve bounce buffer space and initialize bounce buffer data |
161 | * structures for the software IO TLB used to implement the DMA API. | 130 | * structures for the software IO TLB used to implement the DMA API. |
@@ -169,8 +138,6 @@ swiotlb_init_with_default_size(size_t default_size) | |||
169 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | 138 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); |
170 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | 139 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); |
171 | } | 140 | } |
172 | swiotlb_adjust_size(io_tlb_nslabs); | ||
173 | swiotlb_adjust_size(io_tlb_overflow); | ||
174 | 141 | ||
175 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 142 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; |
176 | 143 | ||
@@ -188,14 +155,10 @@ swiotlb_init_with_default_size(size_t default_size) | |||
188 | * between io_tlb_start and io_tlb_end. | 155 | * between io_tlb_start and io_tlb_end. |
189 | */ | 156 | */ |
190 | io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); | 157 | io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); |
191 | for (i = 0; i < io_tlb_nslabs; i++) { | 158 | for (i = 0; i < io_tlb_nslabs; i++) |
192 | if ( !(i % IO_TLB_SEGSIZE) ) | ||
193 | swiotlb_adjust_seg(io_tlb_start + (i << IO_TLB_SHIFT), | ||
194 | IO_TLB_SEGSIZE << IO_TLB_SHIFT); | ||
195 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 159 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); |
196 | } | ||
197 | io_tlb_index = 0; | 160 | io_tlb_index = 0; |
198 | io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(io_tlb_addr_t)); | 161 | io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *)); |
199 | 162 | ||
200 | /* | 163 | /* |
201 | * Get the overflow emergency buffer | 164 | * Get the overflow emergency buffer |
@@ -203,21 +166,17 @@ swiotlb_init_with_default_size(size_t default_size) | |||
203 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); | 166 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); |
204 | if (!io_tlb_overflow_buffer) | 167 | if (!io_tlb_overflow_buffer) |
205 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); | 168 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); |
206 | swiotlb_adjust_seg(io_tlb_overflow_buffer, io_tlb_overflow); | ||
207 | 169 | ||
208 | swiotlb_print_info(bytes); | 170 | printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", |
171 | virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); | ||
209 | } | 172 | } |
210 | #ifndef __swiotlb_init_with_default_size | ||
211 | #define __swiotlb_init_with_default_size swiotlb_init_with_default_size | ||
212 | #endif | ||
213 | 173 | ||
214 | void __init | 174 | void __init |
215 | swiotlb_init(void) | 175 | swiotlb_init(void) |
216 | { | 176 | { |
217 | __swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ | 177 | swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ |
218 | } | 178 | } |
219 | 179 | ||
220 | #ifdef SWIOTLB_ARCH_NEED_LATE_INIT | ||
221 | /* | 180 | /* |
222 | * Systems with larger DMA zones (those that don't support ISA) can | 181 | * Systems with larger DMA zones (those that don't support ISA) can |
223 | * initialize the swiotlb later using the slab allocator if needed. | 182 | * initialize the swiotlb later using the slab allocator if needed. |
@@ -275,12 +234,12 @@ swiotlb_late_init_with_default_size(size_t default_size) | |||
275 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); | 234 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); |
276 | io_tlb_index = 0; | 235 | io_tlb_index = 0; |
277 | 236 | ||
278 | io_tlb_orig_addr = (io_tlb_addr_t *)__get_free_pages(GFP_KERNEL, | 237 | io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, |
279 | get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t))); | 238 | get_order(io_tlb_nslabs * sizeof(char *))); |
280 | if (!io_tlb_orig_addr) | 239 | if (!io_tlb_orig_addr) |
281 | goto cleanup3; | 240 | goto cleanup3; |
282 | 241 | ||
283 | memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(io_tlb_addr_t)); | 242 | memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); |
284 | 243 | ||
285 | /* | 244 | /* |
286 | * Get the overflow emergency buffer | 245 | * Get the overflow emergency buffer |
@@ -290,17 +249,19 @@ swiotlb_late_init_with_default_size(size_t default_size) | |||
290 | if (!io_tlb_overflow_buffer) | 249 | if (!io_tlb_overflow_buffer) |
291 | goto cleanup4; | 250 | goto cleanup4; |
292 | 251 | ||
293 | swiotlb_print_info(bytes); | 252 | printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " |
253 | "0x%lx\n", bytes >> 20, | ||
254 | virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); | ||
294 | 255 | ||
295 | return 0; | 256 | return 0; |
296 | 257 | ||
297 | cleanup4: | 258 | cleanup4: |
298 | free_pages((unsigned long)io_tlb_orig_addr, | 259 | free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * |
299 | get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t))); | 260 | sizeof(char *))); |
300 | io_tlb_orig_addr = NULL; | 261 | io_tlb_orig_addr = NULL; |
301 | cleanup3: | 262 | cleanup3: |
302 | free_pages((unsigned long)io_tlb_list, | 263 | free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * |
303 | get_order(io_tlb_nslabs * sizeof(int))); | 264 | sizeof(int))); |
304 | io_tlb_list = NULL; | 265 | io_tlb_list = NULL; |
305 | cleanup2: | 266 | cleanup2: |
306 | io_tlb_end = NULL; | 267 | io_tlb_end = NULL; |
@@ -310,9 +271,7 @@ cleanup1: | |||
310 | io_tlb_nslabs = req_nslabs; | 271 | io_tlb_nslabs = req_nslabs; |
311 | return -ENOMEM; | 272 | return -ENOMEM; |
312 | } | 273 | } |
313 | #endif | ||
314 | 274 | ||
315 | #ifndef SWIOTLB_ARCH_HAS_NEEDS_MAPPING | ||
316 | static int | 275 | static int |
317 | address_needs_mapping(struct device *hwdev, dma_addr_t addr) | 276 | address_needs_mapping(struct device *hwdev, dma_addr_t addr) |
318 | { | 277 | { |
@@ -323,35 +282,11 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr) | |||
323 | return (addr & ~mask) != 0; | 282 | return (addr & ~mask) != 0; |
324 | } | 283 | } |
325 | 284 | ||
326 | static inline int range_needs_mapping(const void *ptr, size_t size) | ||
327 | { | ||
328 | return swiotlb_force; | ||
329 | } | ||
330 | |||
331 | static inline int order_needs_mapping(unsigned int order) | ||
332 | { | ||
333 | return 0; | ||
334 | } | ||
335 | #endif | ||
336 | |||
337 | static void | ||
338 | __sync_single(io_tlb_addr_t buffer, char *dma_addr, size_t size, int dir) | ||
339 | { | ||
340 | #ifndef SWIOTLB_ARCH_HAS_SYNC_SINGLE | ||
341 | if (dir == DMA_TO_DEVICE) | ||
342 | memcpy(dma_addr, buffer, size); | ||
343 | else | ||
344 | memcpy(buffer, dma_addr, size); | ||
345 | #else | ||
346 | __swiotlb_arch_sync_single(buffer, dma_addr, size, dir); | ||
347 | #endif | ||
348 | } | ||
349 | |||
350 | /* | 285 | /* |
351 | * Allocates bounce buffer and returns its kernel virtual address. | 286 | * Allocates bounce buffer and returns its kernel virtual address. |
352 | */ | 287 | */ |
353 | static void * | 288 | static void * |
354 | map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir) | 289 | map_single(struct device *hwdev, char *buffer, size_t size, int dir) |
355 | { | 290 | { |
356 | unsigned long flags; | 291 | unsigned long flags; |
357 | char *dma_addr; | 292 | char *dma_addr; |
@@ -424,7 +359,7 @@ map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir) | |||
424 | */ | 359 | */ |
425 | io_tlb_orig_addr[index] = buffer; | 360 | io_tlb_orig_addr[index] = buffer; |
426 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) | 361 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) |
427 | __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); | 362 | memcpy(dma_addr, buffer, size); |
428 | 363 | ||
429 | return dma_addr; | 364 | return dma_addr; |
430 | } | 365 | } |
@@ -438,18 +373,17 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | |||
438 | unsigned long flags; | 373 | unsigned long flags; |
439 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 374 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
440 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 375 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
441 | io_tlb_addr_t buffer = io_tlb_orig_addr[index]; | 376 | char *buffer = io_tlb_orig_addr[index]; |
442 | 377 | ||
443 | /* | 378 | /* |
444 | * First, sync the memory before unmapping the entry | 379 | * First, sync the memory before unmapping the entry |
445 | */ | 380 | */ |
446 | if (!swiotlb_orig_addr_null(buffer) | 381 | if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) |
447 | && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) | ||
448 | /* | 382 | /* |
449 | * bounce... copy the data back into the original buffer * and | 383 | * bounce... copy the data back into the original buffer * and |
450 | * delete the bounce buffer. | 384 | * delete the bounce buffer. |
451 | */ | 385 | */ |
452 | __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); | 386 | memcpy(buffer, dma_addr, size); |
453 | 387 | ||
454 | /* | 388 | /* |
455 | * Return the buffer to the free list by setting the corresponding | 389 | * Return the buffer to the free list by setting the corresponding |
@@ -482,18 +416,18 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, | |||
482 | int dir, int target) | 416 | int dir, int target) |
483 | { | 417 | { |
484 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 418 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
485 | io_tlb_addr_t buffer = io_tlb_orig_addr[index]; | 419 | char *buffer = io_tlb_orig_addr[index]; |
486 | 420 | ||
487 | switch (target) { | 421 | switch (target) { |
488 | case SYNC_FOR_CPU: | 422 | case SYNC_FOR_CPU: |
489 | if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) | 423 | if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) |
490 | __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); | 424 | memcpy(buffer, dma_addr, size); |
491 | else | 425 | else |
492 | BUG_ON(dir != DMA_TO_DEVICE); | 426 | BUG_ON(dir != DMA_TO_DEVICE); |
493 | break; | 427 | break; |
494 | case SYNC_FOR_DEVICE: | 428 | case SYNC_FOR_DEVICE: |
495 | if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) | 429 | if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) |
496 | __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); | 430 | memcpy(dma_addr, buffer, size); |
497 | else | 431 | else |
498 | BUG_ON(dir != DMA_FROM_DEVICE); | 432 | BUG_ON(dir != DMA_FROM_DEVICE); |
499 | break; | 433 | break; |
@@ -502,8 +436,6 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, | |||
502 | } | 436 | } |
503 | } | 437 | } |
504 | 438 | ||
505 | #ifdef SWIOTLB_ARCH_NEED_ALLOC | ||
506 | |||
507 | void * | 439 | void * |
508 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 440 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
509 | dma_addr_t *dma_handle, gfp_t flags) | 441 | dma_addr_t *dma_handle, gfp_t flags) |
@@ -519,10 +451,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
519 | */ | 451 | */ |
520 | flags |= GFP_DMA; | 452 | flags |= GFP_DMA; |
521 | 453 | ||
522 | if (!order_needs_mapping(order)) | 454 | ret = (void *)__get_free_pages(flags, order); |
523 | ret = (void *)__get_free_pages(flags, order); | ||
524 | else | ||
525 | ret = NULL; | ||
526 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { | 455 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { |
527 | /* | 456 | /* |
528 | * The allocated memory isn't reachable by the device. | 457 | * The allocated memory isn't reachable by the device. |
@@ -560,7 +489,6 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
560 | *dma_handle = dev_addr; | 489 | *dma_handle = dev_addr; |
561 | return ret; | 490 | return ret; |
562 | } | 491 | } |
563 | EXPORT_SYMBOL(swiotlb_alloc_coherent); | ||
564 | 492 | ||
565 | void | 493 | void |
566 | swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | 494 | swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
@@ -573,9 +501,6 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
573 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 501 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
574 | swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); | 502 | swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); |
575 | } | 503 | } |
576 | EXPORT_SYMBOL(swiotlb_free_coherent); | ||
577 | |||
578 | #endif | ||
579 | 504 | ||
580 | static void | 505 | static void |
581 | swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | 506 | swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) |
@@ -617,14 +542,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | |||
617 | * we can safely return the device addr and not worry about bounce | 542 | * we can safely return the device addr and not worry about bounce |
618 | * buffering it. | 543 | * buffering it. |
619 | */ | 544 | */ |
620 | if (!range_needs_mapping(ptr, size) | 545 | if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) |
621 | && !address_needs_mapping(hwdev, dev_addr)) | ||
622 | return dev_addr; | 546 | return dev_addr; |
623 | 547 | ||
624 | /* | 548 | /* |
625 | * Oh well, have to allocate and map a bounce buffer. | 549 | * Oh well, have to allocate and map a bounce buffer. |
626 | */ | 550 | */ |
627 | map = map_single(hwdev, ptr_to_io_tlb_addr(ptr), size, dir); | 551 | map = map_single(hwdev, ptr, size, dir); |
628 | if (!map) { | 552 | if (!map) { |
629 | swiotlb_full(hwdev, size, dir, 1); | 553 | swiotlb_full(hwdev, size, dir, 1); |
630 | map = io_tlb_overflow_buffer; | 554 | map = io_tlb_overflow_buffer; |
@@ -752,16 +676,17 @@ int | |||
752 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, | 676 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, |
753 | int dir) | 677 | int dir) |
754 | { | 678 | { |
679 | void *addr; | ||
755 | dma_addr_t dev_addr; | 680 | dma_addr_t dev_addr; |
756 | int i; | 681 | int i; |
757 | 682 | ||
758 | BUG_ON(dir == DMA_NONE); | 683 | BUG_ON(dir == DMA_NONE); |
759 | 684 | ||
760 | for (i = 0; i < nelems; i++, sg++) { | 685 | for (i = 0; i < nelems; i++, sg++) { |
761 | dev_addr = SG_ENT_PHYS_ADDRESS(sg); | 686 | addr = SG_ENT_VIRT_ADDRESS(sg); |
762 | if (range_needs_mapping(SG_ENT_VIRT_ADDRESS(sg), sg->length) | 687 | dev_addr = virt_to_bus(addr); |
763 | || address_needs_mapping(hwdev, dev_addr)) { | 688 | if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { |
764 | void *map = map_single(hwdev, sg_to_io_tlb_addr(sg), sg->length, dir); | 689 | void *map = map_single(hwdev, addr, sg->length, dir); |
765 | if (!map) { | 690 | if (!map) { |
766 | /* Don't panic here, we expect map_sg users | 691 | /* Don't panic here, we expect map_sg users |
767 | to do proper error handling. */ | 692 | to do proper error handling. */ |
@@ -835,44 +760,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | |||
835 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); | 760 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); |
836 | } | 761 | } |
837 | 762 | ||
838 | #ifdef SWIOTLB_ARCH_NEED_MAP_PAGE | ||
839 | |||
840 | dma_addr_t | ||
841 | swiotlb_map_page(struct device *hwdev, struct page *page, | ||
842 | unsigned long offset, size_t size, | ||
843 | enum dma_data_direction direction) | ||
844 | { | ||
845 | dma_addr_t dev_addr; | ||
846 | char *map; | ||
847 | |||
848 | dev_addr = page_to_bus(page) + offset; | ||
849 | if (address_needs_mapping(hwdev, dev_addr)) { | ||
850 | map = map_single(hwdev, page_to_io_tlb_addr(page, offset), size, direction); | ||
851 | if (!map) { | ||
852 | swiotlb_full(hwdev, size, direction, 1); | ||
853 | map = io_tlb_overflow_buffer; | ||
854 | } | ||
855 | dev_addr = virt_to_bus(map); | ||
856 | } | ||
857 | |||
858 | return dev_addr; | ||
859 | } | ||
860 | |||
861 | void | ||
862 | swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | ||
863 | size_t size, enum dma_data_direction direction) | ||
864 | { | ||
865 | char *dma_addr = bus_to_virt(dev_addr); | ||
866 | |||
867 | BUG_ON(direction == DMA_NONE); | ||
868 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | ||
869 | unmap_single(hwdev, dma_addr, size, direction); | ||
870 | else if (direction == DMA_FROM_DEVICE) | ||
871 | dma_mark_clean(dma_addr, size); | ||
872 | } | ||
873 | |||
874 | #endif | ||
875 | |||
876 | int | 763 | int |
877 | swiotlb_dma_mapping_error(dma_addr_t dma_addr) | 764 | swiotlb_dma_mapping_error(dma_addr_t dma_addr) |
878 | { | 765 | { |
@@ -885,13 +772,10 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr) | |||
885 | * during bus mastering, then you would pass 0x00ffffff as the mask to | 772 | * during bus mastering, then you would pass 0x00ffffff as the mask to |
886 | * this function. | 773 | * this function. |
887 | */ | 774 | */ |
888 | #ifndef __swiotlb_dma_supported | ||
889 | #define __swiotlb_dma_supported(hwdev, mask) (virt_to_bus(io_tlb_end - 1) <= (mask)) | ||
890 | #endif | ||
891 | int | 775 | int |
892 | swiotlb_dma_supported(struct device *hwdev, u64 mask) | 776 | swiotlb_dma_supported(struct device *hwdev, u64 mask) |
893 | { | 777 | { |
894 | return __swiotlb_dma_supported(hwdev, mask); | 778 | return virt_to_bus(io_tlb_end - 1) <= mask; |
895 | } | 779 | } |
896 | 780 | ||
897 | EXPORT_SYMBOL(swiotlb_init); | 781 | EXPORT_SYMBOL(swiotlb_init); |
@@ -906,4 +790,6 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | |||
906 | EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); | 790 | EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); |
907 | EXPORT_SYMBOL(swiotlb_sync_sg_for_device); | 791 | EXPORT_SYMBOL(swiotlb_sync_sg_for_device); |
908 | EXPORT_SYMBOL(swiotlb_dma_mapping_error); | 792 | EXPORT_SYMBOL(swiotlb_dma_mapping_error); |
793 | EXPORT_SYMBOL(swiotlb_alloc_coherent); | ||
794 | EXPORT_SYMBOL(swiotlb_free_coherent); | ||
909 | EXPORT_SYMBOL(swiotlb_dma_supported); | 795 | EXPORT_SYMBOL(swiotlb_dma_supported); |