aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug32
-rw-r--r--lib/debugobjects.c4
-rw-r--r--lib/swiotlb.c255
3 files changed, 223 insertions, 68 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b0f239e443bc..2e75478e9c69 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -252,6 +252,14 @@ config DEBUG_OBJECTS_TIMERS
252 timer routines to track the life time of timer objects and 252 timer routines to track the life time of timer objects and
253 validate the timer operations. 253 validate the timer operations.
254 254
255config DEBUG_OBJECTS_ENABLE_DEFAULT
256 int "debug_objects bootup default value (0-1)"
257 range 0 1
258 default "1"
259 depends on DEBUG_OBJECTS
260 help
261 Debug objects boot parameter default value
262
255config DEBUG_SLAB 263config DEBUG_SLAB
256 bool "Debug slab memory allocations" 264 bool "Debug slab memory allocations"
257 depends on DEBUG_KERNEL && SLAB 265 depends on DEBUG_KERNEL && SLAB
@@ -545,6 +553,16 @@ config DEBUG_SG
545 553
546 If unsure, say N. 554 If unsure, say N.
547 555
556config DEBUG_NOTIFIERS
557 bool "Debug notifier call chains"
558 depends on DEBUG_KERNEL
559 help
560 Enable this to turn on sanity checking for notifier call chains.
561 This is most useful for kernel developers to make sure that
562 modules properly unregister themselves from notifier chains.
563 This is a relatively cheap check but if you care about maximum
564 performance, say N.
565
548config FRAME_POINTER 566config FRAME_POINTER
549 bool "Compile the kernel with frame pointers" 567 bool "Compile the kernel with frame pointers"
550 depends on DEBUG_KERNEL && \ 568 depends on DEBUG_KERNEL && \
@@ -619,6 +637,19 @@ config RCU_CPU_STALL_DETECTOR
619 637
620 Say N if you are unsure. 638 Say N if you are unsure.
621 639
640config RCU_CPU_STALL_DETECTOR
641 bool "Check for stalled CPUs delaying RCU grace periods"
642 depends on CLASSIC_RCU || TREE_RCU
643 default n
644 help
645 This option causes RCU to printk information on which
646 CPUs are delaying the current grace period, but only when
647 the grace period extends for excessive time periods.
648
649 Say Y if you want RCU to perform such checks.
650
651 Say N if you are unsure.
652
622config KPROBES_SANITY_TEST 653config KPROBES_SANITY_TEST
623 bool "Kprobes sanity tests" 654 bool "Kprobes sanity tests"
624 depends on DEBUG_KERNEL 655 depends on DEBUG_KERNEL
@@ -699,6 +730,7 @@ config FAULT_INJECTION
699config FAILSLAB 730config FAILSLAB
700 bool "Fault-injection capability for kmalloc" 731 bool "Fault-injection capability for kmalloc"
701 depends on FAULT_INJECTION 732 depends on FAULT_INJECTION
733 depends on SLAB || SLUB
702 help 734 help
703 Provide fault-injection capability for kmalloc. 735 Provide fault-injection capability for kmalloc.
704 736
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index e3ab374e1334..5d99be1fd988 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -45,7 +45,9 @@ static struct kmem_cache *obj_cache;
45static int debug_objects_maxchain __read_mostly; 45static int debug_objects_maxchain __read_mostly;
46static int debug_objects_fixups __read_mostly; 46static int debug_objects_fixups __read_mostly;
47static int debug_objects_warnings __read_mostly; 47static int debug_objects_warnings __read_mostly;
48static int debug_objects_enabled __read_mostly; 48static int debug_objects_enabled __read_mostly
49 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
50
49static struct debug_obj_descr *descr_test __read_mostly; 51static struct debug_obj_descr *descr_test __read_mostly;
50 52
51static int __init enable_object_debug(char *str) 53static int __init enable_object_debug(char *str)
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 5f6c629a924d..fa2dc4e5f9ba 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -21,9 +21,12 @@
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/swiotlb.h>
24#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/swiotlb.h>
25#include <linux/types.h> 27#include <linux/types.h>
26#include <linux/ctype.h> 28#include <linux/ctype.h>
29#include <linux/highmem.h>
27 30
28#include <asm/io.h> 31#include <asm/io.h>
29#include <asm/dma.h> 32#include <asm/dma.h>
@@ -36,22 +39,6 @@
36#define OFFSET(val,align) ((unsigned long) \ 39#define OFFSET(val,align) ((unsigned long) \
37 ( (val) & ( (align) - 1))) 40 ( (val) & ( (align) - 1)))
38 41
39#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
40#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
41
42/*
43 * Maximum allowable number of contiguous slabs to map,
44 * must be a power of 2. What is the appropriate value ?
45 * The complexity of {map,unmap}_single is linearly dependent on this value.
46 */
47#define IO_TLB_SEGSIZE 128
48
49/*
50 * log of the size of each IO TLB slab. The number of slabs is command line
51 * controllable.
52 */
53#define IO_TLB_SHIFT 11
54
55#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 42#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
56 43
57/* 44/*
@@ -102,7 +89,10 @@ static unsigned int io_tlb_index;
102 * We need to save away the original address corresponding to a mapped entry 89 * We need to save away the original address corresponding to a mapped entry
103 * for the sync operations. 90 * for the sync operations.
104 */ 91 */
105static unsigned char **io_tlb_orig_addr; 92static struct swiotlb_phys_addr {
93 struct page *page;
94 unsigned int offset;
95} *io_tlb_orig_addr;
106 96
107/* 97/*
108 * Protect the above data structures in the map and unmap calls 98 * Protect the above data structures in the map and unmap calls
@@ -126,6 +116,72 @@ setup_io_tlb_npages(char *str)
126__setup("swiotlb=", setup_io_tlb_npages); 116__setup("swiotlb=", setup_io_tlb_npages);
127/* make io_tlb_overflow tunable too? */ 117/* make io_tlb_overflow tunable too? */
128 118
119void * __weak swiotlb_alloc_boot(size_t size, unsigned long nslabs)
120{
121 return alloc_bootmem_low_pages(size);
122}
123
124void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
125{
126 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
127}
128
129dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr)
130{
131 return paddr;
132}
133
134phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
135{
136 return baddr;
137}
138
139static dma_addr_t swiotlb_virt_to_bus(volatile void *address)
140{
141 return swiotlb_phys_to_bus(virt_to_phys(address));
142}
143
144static void *swiotlb_bus_to_virt(dma_addr_t address)
145{
146 return phys_to_virt(swiotlb_bus_to_phys(address));
147}
148
149int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
150{
151 return 0;
152}
153
154static dma_addr_t swiotlb_sg_to_bus(struct scatterlist *sg)
155{
156 return swiotlb_phys_to_bus(page_to_phys(sg_page(sg)) + sg->offset);
157}
158
159static void swiotlb_print_info(unsigned long bytes)
160{
161 phys_addr_t pstart, pend;
162 dma_addr_t bstart, bend;
163
164 pstart = virt_to_phys(io_tlb_start);
165 pend = virt_to_phys(io_tlb_end);
166
167 bstart = swiotlb_phys_to_bus(pstart);
168 bend = swiotlb_phys_to_bus(pend);
169
170 printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
171 bytes >> 20, io_tlb_start, io_tlb_end);
172 if (pstart != bstart || pend != bend)
173 printk(KERN_INFO "software IO TLB at phys %#llx - %#llx"
174 " bus %#llx - %#llx\n",
175 (unsigned long long)pstart,
176 (unsigned long long)pend,
177 (unsigned long long)bstart,
178 (unsigned long long)bend);
179 else
180 printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
181 (unsigned long long)pstart,
182 (unsigned long long)pend);
183}
184
129/* 185/*
130 * Statically reserve bounce buffer space and initialize bounce buffer data 186 * Statically reserve bounce buffer space and initialize bounce buffer data
131 * structures for the software IO TLB used to implement the DMA API. 187 * structures for the software IO TLB used to implement the DMA API.
@@ -145,7 +201,7 @@ swiotlb_init_with_default_size(size_t default_size)
145 /* 201 /*
146 * Get IO TLB memory from the low pages 202 * Get IO TLB memory from the low pages
147 */ 203 */
148 io_tlb_start = alloc_bootmem_low_pages(bytes); 204 io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs);
149 if (!io_tlb_start) 205 if (!io_tlb_start)
150 panic("Cannot allocate SWIOTLB buffer"); 206 panic("Cannot allocate SWIOTLB buffer");
151 io_tlb_end = io_tlb_start + bytes; 207 io_tlb_end = io_tlb_start + bytes;
@@ -159,7 +215,7 @@ swiotlb_init_with_default_size(size_t default_size)
159 for (i = 0; i < io_tlb_nslabs; i++) 215 for (i = 0; i < io_tlb_nslabs; i++)
160 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 216 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
161 io_tlb_index = 0; 217 io_tlb_index = 0;
162 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *)); 218 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr));
163 219
164 /* 220 /*
165 * Get the overflow emergency buffer 221 * Get the overflow emergency buffer
@@ -168,8 +224,7 @@ swiotlb_init_with_default_size(size_t default_size)
168 if (!io_tlb_overflow_buffer) 224 if (!io_tlb_overflow_buffer)
169 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 225 panic("Cannot allocate SWIOTLB overflow buffer!\n");
170 226
171 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", 227 swiotlb_print_info(bytes);
172 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
173} 228}
174 229
175void __init 230void __init
@@ -202,8 +257,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
202 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 257 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
203 258
204 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 259 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
205 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 260 io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs);
206 order);
207 if (io_tlb_start) 261 if (io_tlb_start)
208 break; 262 break;
209 order--; 263 order--;
@@ -235,12 +289,12 @@ swiotlb_late_init_with_default_size(size_t default_size)
235 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 289 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
236 io_tlb_index = 0; 290 io_tlb_index = 0;
237 291
238 io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, 292 io_tlb_orig_addr = (struct swiotlb_phys_addr *)__get_free_pages(GFP_KERNEL,
239 get_order(io_tlb_nslabs * sizeof(char *))); 293 get_order(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)));
240 if (!io_tlb_orig_addr) 294 if (!io_tlb_orig_addr)
241 goto cleanup3; 295 goto cleanup3;
242 296
243 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); 297 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(struct swiotlb_phys_addr));
244 298
245 /* 299 /*
246 * Get the overflow emergency buffer 300 * Get the overflow emergency buffer
@@ -250,9 +304,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
250 if (!io_tlb_overflow_buffer) 304 if (!io_tlb_overflow_buffer)
251 goto cleanup4; 305 goto cleanup4;
252 306
253 printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " 307 swiotlb_print_info(bytes);
254 "0x%lx\n", bytes >> 20,
255 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
256 308
257 return 0; 309 return 0;
258 310
@@ -279,16 +331,69 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
279 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); 331 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
280} 332}
281 333
334static inline int range_needs_mapping(void *ptr, size_t size)
335{
336 return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size);
337}
338
282static int is_swiotlb_buffer(char *addr) 339static int is_swiotlb_buffer(char *addr)
283{ 340{
284 return addr >= io_tlb_start && addr < io_tlb_end; 341 return addr >= io_tlb_start && addr < io_tlb_end;
285} 342}
286 343
344static struct swiotlb_phys_addr swiotlb_bus_to_phys_addr(char *dma_addr)
345{
346 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
347 struct swiotlb_phys_addr buffer = io_tlb_orig_addr[index];
348 buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
349 buffer.page += buffer.offset >> PAGE_SHIFT;
350 buffer.offset &= PAGE_SIZE - 1;
351 return buffer;
352}
353
354static void
355__sync_single(struct swiotlb_phys_addr buffer, char *dma_addr, size_t size, int dir)
356{
357 if (PageHighMem(buffer.page)) {
358 size_t len, bytes;
359 char *dev, *host, *kmp;
360
361 len = size;
362 while (len != 0) {
363 unsigned long flags;
364
365 bytes = len;
366 if ((bytes + buffer.offset) > PAGE_SIZE)
367 bytes = PAGE_SIZE - buffer.offset;
368 local_irq_save(flags); /* protects KM_BOUNCE_READ */
369 kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ);
370 dev = dma_addr + size - len;
371 host = kmp + buffer.offset;
372 if (dir == DMA_FROM_DEVICE)
373 memcpy(host, dev, bytes);
374 else
375 memcpy(dev, host, bytes);
376 kunmap_atomic(kmp, KM_BOUNCE_READ);
377 local_irq_restore(flags);
378 len -= bytes;
379 buffer.page++;
380 buffer.offset = 0;
381 }
382 } else {
383 void *v = page_address(buffer.page) + buffer.offset;
384
385 if (dir == DMA_TO_DEVICE)
386 memcpy(dma_addr, v, size);
387 else
388 memcpy(v, dma_addr, size);
389 }
390}
391
287/* 392/*
288 * Allocates bounce buffer and returns its kernel virtual address. 393 * Allocates bounce buffer and returns its kernel virtual address.
289 */ 394 */
290static void * 395static void *
291map_single(struct device *hwdev, char *buffer, size_t size, int dir) 396map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, int dir)
292{ 397{
293 unsigned long flags; 398 unsigned long flags;
294 char *dma_addr; 399 char *dma_addr;
@@ -298,11 +403,16 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
298 unsigned long mask; 403 unsigned long mask;
299 unsigned long offset_slots; 404 unsigned long offset_slots;
300 unsigned long max_slots; 405 unsigned long max_slots;
406 struct swiotlb_phys_addr slot_buf;
301 407
302 mask = dma_get_seg_boundary(hwdev); 408 mask = dma_get_seg_boundary(hwdev);
303 start_dma_addr = virt_to_bus(io_tlb_start) & mask; 409 start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask;
304 410
305 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 411 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
412
413 /*
414 * Carefully handle integer overflow which can occur when mask == ~0UL.
415 */
306 max_slots = mask + 1 416 max_slots = mask + 1
307 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT 417 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
308 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 418 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
@@ -378,10 +488,15 @@ found:
378 * This is needed when we sync the memory. Then we sync the buffer if 488 * This is needed when we sync the memory. Then we sync the buffer if
379 * needed. 489 * needed.
380 */ 490 */
381 for (i = 0; i < nslots; i++) 491 slot_buf = buffer;
382 io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT); 492 for (i = 0; i < nslots; i++) {
493 slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
494 slot_buf.offset &= PAGE_SIZE - 1;
495 io_tlb_orig_addr[index+i] = slot_buf;
496 slot_buf.offset += 1 << IO_TLB_SHIFT;
497 }
383 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 498 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
384 memcpy(dma_addr, buffer, size); 499 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
385 500
386 return dma_addr; 501 return dma_addr;
387} 502}
@@ -395,17 +510,17 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
395 unsigned long flags; 510 unsigned long flags;
396 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 511 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
397 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 512 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
398 char *buffer = io_tlb_orig_addr[index]; 513 struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr);
399 514
400 /* 515 /*
401 * First, sync the memory before unmapping the entry 516 * First, sync the memory before unmapping the entry
402 */ 517 */
403 if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 518 if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
404 /* 519 /*
405 * bounce... copy the data back into the original buffer * and 520 * bounce... copy the data back into the original buffer * and
406 * delete the bounce buffer. 521 * delete the bounce buffer.
407 */ 522 */
408 memcpy(buffer, dma_addr, size); 523 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
409 524
410 /* 525 /*
411 * Return the buffer to the free list by setting the corresponding 526 * Return the buffer to the free list by setting the corresponding
@@ -437,21 +552,18 @@ static void
437sync_single(struct device *hwdev, char *dma_addr, size_t size, 552sync_single(struct device *hwdev, char *dma_addr, size_t size,
438 int dir, int target) 553 int dir, int target)
439{ 554{
440 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 555 struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr);
441 char *buffer = io_tlb_orig_addr[index];
442
443 buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
444 556
445 switch (target) { 557 switch (target) {
446 case SYNC_FOR_CPU: 558 case SYNC_FOR_CPU:
447 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 559 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
448 memcpy(buffer, dma_addr, size); 560 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
449 else 561 else
450 BUG_ON(dir != DMA_TO_DEVICE); 562 BUG_ON(dir != DMA_TO_DEVICE);
451 break; 563 break;
452 case SYNC_FOR_DEVICE: 564 case SYNC_FOR_DEVICE:
453 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 565 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
454 memcpy(dma_addr, buffer, size); 566 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
455 else 567 else
456 BUG_ON(dir != DMA_FROM_DEVICE); 568 BUG_ON(dir != DMA_FROM_DEVICE);
457 break; 569 break;
@@ -473,7 +585,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
473 dma_mask = hwdev->coherent_dma_mask; 585 dma_mask = hwdev->coherent_dma_mask;
474 586
475 ret = (void *)__get_free_pages(flags, order); 587 ret = (void *)__get_free_pages(flags, order);
476 if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) { 588 if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) {
477 /* 589 /*
478 * The allocated memory isn't reachable by the device. 590 * The allocated memory isn't reachable by the device.
479 * Fall back on swiotlb_map_single(). 591 * Fall back on swiotlb_map_single().
@@ -488,13 +600,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
488 * swiotlb_map_single(), which will grab memory from 600 * swiotlb_map_single(), which will grab memory from
489 * the lowest available address range. 601 * the lowest available address range.
490 */ 602 */
491 ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE); 603 struct swiotlb_phys_addr buffer;
604 buffer.page = virt_to_page(NULL);
605 buffer.offset = 0;
606 ret = map_single(hwdev, buffer, size, DMA_FROM_DEVICE);
492 if (!ret) 607 if (!ret)
493 return NULL; 608 return NULL;
494 } 609 }
495 610
496 memset(ret, 0, size); 611 memset(ret, 0, size);
497 dev_addr = virt_to_bus(ret); 612 dev_addr = swiotlb_virt_to_bus(ret);
498 613
499 /* Confirm address can be DMA'd by device */ 614 /* Confirm address can be DMA'd by device */
500 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { 615 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
@@ -554,8 +669,9 @@ dma_addr_t
554swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, 669swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
555 int dir, struct dma_attrs *attrs) 670 int dir, struct dma_attrs *attrs)
556{ 671{
557 dma_addr_t dev_addr = virt_to_bus(ptr); 672 dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr);
558 void *map; 673 void *map;
674 struct swiotlb_phys_addr buffer;
559 675
560 BUG_ON(dir == DMA_NONE); 676 BUG_ON(dir == DMA_NONE);
561 /* 677 /*
@@ -563,19 +679,22 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
563 * we can safely return the device addr and not worry about bounce 679 * we can safely return the device addr and not worry about bounce
564 * buffering it. 680 * buffering it.
565 */ 681 */
566 if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force) 682 if (!address_needs_mapping(hwdev, dev_addr, size) &&
683 !range_needs_mapping(ptr, size))
567 return dev_addr; 684 return dev_addr;
568 685
569 /* 686 /*
570 * Oh well, have to allocate and map a bounce buffer. 687 * Oh well, have to allocate and map a bounce buffer.
571 */ 688 */
572 map = map_single(hwdev, ptr, size, dir); 689 buffer.page = virt_to_page(ptr);
690 buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
691 map = map_single(hwdev, buffer, size, dir);
573 if (!map) { 692 if (!map) {
574 swiotlb_full(hwdev, size, dir, 1); 693 swiotlb_full(hwdev, size, dir, 1);
575 map = io_tlb_overflow_buffer; 694 map = io_tlb_overflow_buffer;
576 } 695 }
577 696
578 dev_addr = virt_to_bus(map); 697 dev_addr = swiotlb_virt_to_bus(map);
579 698
580 /* 699 /*
581 * Ensure that the address returned is DMA'ble 700 * Ensure that the address returned is DMA'ble
@@ -605,7 +724,7 @@ void
605swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, 724swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
606 size_t size, int dir, struct dma_attrs *attrs) 725 size_t size, int dir, struct dma_attrs *attrs)
607{ 726{
608 char *dma_addr = bus_to_virt(dev_addr); 727 char *dma_addr = swiotlb_bus_to_virt(dev_addr);
609 728
610 BUG_ON(dir == DMA_NONE); 729 BUG_ON(dir == DMA_NONE);
611 if (is_swiotlb_buffer(dma_addr)) 730 if (is_swiotlb_buffer(dma_addr))
@@ -635,7 +754,7 @@ static void
635swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 754swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
636 size_t size, int dir, int target) 755 size_t size, int dir, int target)
637{ 756{
638 char *dma_addr = bus_to_virt(dev_addr); 757 char *dma_addr = swiotlb_bus_to_virt(dev_addr);
639 758
640 BUG_ON(dir == DMA_NONE); 759 BUG_ON(dir == DMA_NONE);
641 if (is_swiotlb_buffer(dma_addr)) 760 if (is_swiotlb_buffer(dma_addr))
@@ -666,7 +785,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
666 unsigned long offset, size_t size, 785 unsigned long offset, size_t size,
667 int dir, int target) 786 int dir, int target)
668{ 787{
669 char *dma_addr = bus_to_virt(dev_addr) + offset; 788 char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset;
670 789
671 BUG_ON(dir == DMA_NONE); 790 BUG_ON(dir == DMA_NONE);
672 if (is_swiotlb_buffer(dma_addr)) 791 if (is_swiotlb_buffer(dma_addr))
@@ -714,18 +833,20 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
714 int dir, struct dma_attrs *attrs) 833 int dir, struct dma_attrs *attrs)
715{ 834{
716 struct scatterlist *sg; 835 struct scatterlist *sg;
717 void *addr; 836 struct swiotlb_phys_addr buffer;
718 dma_addr_t dev_addr; 837 dma_addr_t dev_addr;
719 int i; 838 int i;
720 839
721 BUG_ON(dir == DMA_NONE); 840 BUG_ON(dir == DMA_NONE);
722 841
723 for_each_sg(sgl, sg, nelems, i) { 842 for_each_sg(sgl, sg, nelems, i) {
724 addr = SG_ENT_VIRT_ADDRESS(sg); 843 dev_addr = swiotlb_sg_to_bus(sg);
725 dev_addr = virt_to_bus(addr); 844 if (range_needs_mapping(sg_virt(sg), sg->length) ||
726 if (swiotlb_force ||
727 address_needs_mapping(hwdev, dev_addr, sg->length)) { 845 address_needs_mapping(hwdev, dev_addr, sg->length)) {
728 void *map = map_single(hwdev, addr, sg->length, dir); 846 void *map;
847 buffer.page = sg_page(sg);
848 buffer.offset = sg->offset;
849 map = map_single(hwdev, buffer, sg->length, dir);
729 if (!map) { 850 if (!map) {
730 /* Don't panic here, we expect map_sg users 851 /* Don't panic here, we expect map_sg users
731 to do proper error handling. */ 852 to do proper error handling. */
@@ -735,7 +856,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
735 sgl[0].dma_length = 0; 856 sgl[0].dma_length = 0;
736 return 0; 857 return 0;
737 } 858 }
738 sg->dma_address = virt_to_bus(map); 859 sg->dma_address = swiotlb_virt_to_bus(map);
739 } else 860 } else
740 sg->dma_address = dev_addr; 861 sg->dma_address = dev_addr;
741 sg->dma_length = sg->length; 862 sg->dma_length = sg->length;
@@ -765,11 +886,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
765 BUG_ON(dir == DMA_NONE); 886 BUG_ON(dir == DMA_NONE);
766 887
767 for_each_sg(sgl, sg, nelems, i) { 888 for_each_sg(sgl, sg, nelems, i) {
768 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 889 if (sg->dma_address != swiotlb_sg_to_bus(sg))
769 unmap_single(hwdev, bus_to_virt(sg->dma_address), 890 unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
770 sg->dma_length, dir); 891 sg->dma_length, dir);
771 else if (dir == DMA_FROM_DEVICE) 892 else if (dir == DMA_FROM_DEVICE)
772 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 893 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
773 } 894 }
774} 895}
775EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); 896EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
@@ -798,11 +919,11 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
798 BUG_ON(dir == DMA_NONE); 919 BUG_ON(dir == DMA_NONE);
799 920
800 for_each_sg(sgl, sg, nelems, i) { 921 for_each_sg(sgl, sg, nelems, i) {
801 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 922 if (sg->dma_address != swiotlb_sg_to_bus(sg))
802 sync_single(hwdev, bus_to_virt(sg->dma_address), 923 sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
803 sg->dma_length, dir, target); 924 sg->dma_length, dir, target);
804 else if (dir == DMA_FROM_DEVICE) 925 else if (dir == DMA_FROM_DEVICE)
805 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 926 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
806 } 927 }
807} 928}
808 929
@@ -823,7 +944,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
823int 944int
824swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 945swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
825{ 946{
826 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); 947 return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer));
827} 948}
828 949
829/* 950/*
@@ -835,7 +956,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
835int 956int
836swiotlb_dma_supported(struct device *hwdev, u64 mask) 957swiotlb_dma_supported(struct device *hwdev, u64 mask)
837{ 958{
838 return virt_to_bus(io_tlb_end - 1) <= mask; 959 return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask;
839} 960}
840 961
841EXPORT_SYMBOL(swiotlb_map_single); 962EXPORT_SYMBOL(swiotlb_map_single);