diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/dma-debug.c | 115 | ||||
-rw-r--r-- | lib/swiotlb.c | 119 |
2 files changed, 167 insertions, 67 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 69da09a085a1..8fcc09c91e1b 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -85,6 +85,7 @@ static u32 show_num_errors = 1; | |||
85 | 85 | ||
86 | static u32 num_free_entries; | 86 | static u32 num_free_entries; |
87 | static u32 min_free_entries; | 87 | static u32 min_free_entries; |
88 | static u32 nr_total_entries; | ||
88 | 89 | ||
89 | /* number of preallocated entries requested by kernel cmdline */ | 90 | /* number of preallocated entries requested by kernel cmdline */ |
90 | static u32 req_entries; | 91 | static u32 req_entries; |
@@ -185,15 +186,50 @@ static void put_hash_bucket(struct hash_bucket *bucket, | |||
185 | static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | 186 | static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, |
186 | struct dma_debug_entry *ref) | 187 | struct dma_debug_entry *ref) |
187 | { | 188 | { |
188 | struct dma_debug_entry *entry; | 189 | struct dma_debug_entry *entry, *ret = NULL; |
190 | int matches = 0, match_lvl, last_lvl = 0; | ||
189 | 191 | ||
190 | list_for_each_entry(entry, &bucket->list, list) { | 192 | list_for_each_entry(entry, &bucket->list, list) { |
191 | if ((entry->dev_addr == ref->dev_addr) && | 193 | if ((entry->dev_addr != ref->dev_addr) || |
192 | (entry->dev == ref->dev)) | 194 | (entry->dev != ref->dev)) |
195 | continue; | ||
196 | |||
197 | /* | ||
198 | * Some drivers map the same physical address multiple | ||
199 | * times. Without a hardware IOMMU this results in the | ||
200 | * same device addresses being put into the dma-debug | ||
201 | * hash multiple times too. This can result in false | ||
202 | * positives being reported. Therfore we implement a | ||
203 | * best-fit algorithm here which returns the entry from | ||
204 | * the hash which fits best to the reference value | ||
205 | * instead of the first-fit. | ||
206 | */ | ||
207 | matches += 1; | ||
208 | match_lvl = 0; | ||
209 | entry->size == ref->size ? ++match_lvl : match_lvl; | ||
210 | entry->type == ref->type ? ++match_lvl : match_lvl; | ||
211 | entry->direction == ref->direction ? ++match_lvl : match_lvl; | ||
212 | |||
213 | if (match_lvl == 3) { | ||
214 | /* perfect-fit - return the result */ | ||
193 | return entry; | 215 | return entry; |
216 | } else if (match_lvl > last_lvl) { | ||
217 | /* | ||
218 | * We found an entry that fits better then the | ||
219 | * previous one | ||
220 | */ | ||
221 | last_lvl = match_lvl; | ||
222 | ret = entry; | ||
223 | } | ||
194 | } | 224 | } |
195 | 225 | ||
196 | return NULL; | 226 | /* |
227 | * If we have multiple matches but no perfect-fit, just return | ||
228 | * NULL. | ||
229 | */ | ||
230 | ret = (matches == 1) ? ret : NULL; | ||
231 | |||
232 | return ret; | ||
197 | } | 233 | } |
198 | 234 | ||
199 | /* | 235 | /* |
@@ -257,6 +293,21 @@ static void add_dma_entry(struct dma_debug_entry *entry) | |||
257 | put_hash_bucket(bucket, &flags); | 293 | put_hash_bucket(bucket, &flags); |
258 | } | 294 | } |
259 | 295 | ||
296 | static struct dma_debug_entry *__dma_entry_alloc(void) | ||
297 | { | ||
298 | struct dma_debug_entry *entry; | ||
299 | |||
300 | entry = list_entry(free_entries.next, struct dma_debug_entry, list); | ||
301 | list_del(&entry->list); | ||
302 | memset(entry, 0, sizeof(*entry)); | ||
303 | |||
304 | num_free_entries -= 1; | ||
305 | if (num_free_entries < min_free_entries) | ||
306 | min_free_entries = num_free_entries; | ||
307 | |||
308 | return entry; | ||
309 | } | ||
310 | |||
260 | /* struct dma_entry allocator | 311 | /* struct dma_entry allocator |
261 | * | 312 | * |
262 | * The next two functions implement the allocator for | 313 | * The next two functions implement the allocator for |
@@ -276,9 +327,7 @@ static struct dma_debug_entry *dma_entry_alloc(void) | |||
276 | goto out; | 327 | goto out; |
277 | } | 328 | } |
278 | 329 | ||
279 | entry = list_entry(free_entries.next, struct dma_debug_entry, list); | 330 | entry = __dma_entry_alloc(); |
280 | list_del(&entry->list); | ||
281 | memset(entry, 0, sizeof(*entry)); | ||
282 | 331 | ||
283 | #ifdef CONFIG_STACKTRACE | 332 | #ifdef CONFIG_STACKTRACE |
284 | entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; | 333 | entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; |
@@ -286,9 +335,6 @@ static struct dma_debug_entry *dma_entry_alloc(void) | |||
286 | entry->stacktrace.skip = 2; | 335 | entry->stacktrace.skip = 2; |
287 | save_stack_trace(&entry->stacktrace); | 336 | save_stack_trace(&entry->stacktrace); |
288 | #endif | 337 | #endif |
289 | num_free_entries -= 1; | ||
290 | if (num_free_entries < min_free_entries) | ||
291 | min_free_entries = num_free_entries; | ||
292 | 338 | ||
293 | out: | 339 | out: |
294 | spin_unlock_irqrestore(&free_entries_lock, flags); | 340 | spin_unlock_irqrestore(&free_entries_lock, flags); |
@@ -310,6 +356,53 @@ static void dma_entry_free(struct dma_debug_entry *entry) | |||
310 | spin_unlock_irqrestore(&free_entries_lock, flags); | 356 | spin_unlock_irqrestore(&free_entries_lock, flags); |
311 | } | 357 | } |
312 | 358 | ||
359 | int dma_debug_resize_entries(u32 num_entries) | ||
360 | { | ||
361 | int i, delta, ret = 0; | ||
362 | unsigned long flags; | ||
363 | struct dma_debug_entry *entry; | ||
364 | LIST_HEAD(tmp); | ||
365 | |||
366 | spin_lock_irqsave(&free_entries_lock, flags); | ||
367 | |||
368 | if (nr_total_entries < num_entries) { | ||
369 | delta = num_entries - nr_total_entries; | ||
370 | |||
371 | spin_unlock_irqrestore(&free_entries_lock, flags); | ||
372 | |||
373 | for (i = 0; i < delta; i++) { | ||
374 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | ||
375 | if (!entry) | ||
376 | break; | ||
377 | |||
378 | list_add_tail(&entry->list, &tmp); | ||
379 | } | ||
380 | |||
381 | spin_lock_irqsave(&free_entries_lock, flags); | ||
382 | |||
383 | list_splice(&tmp, &free_entries); | ||
384 | nr_total_entries += i; | ||
385 | num_free_entries += i; | ||
386 | } else { | ||
387 | delta = nr_total_entries - num_entries; | ||
388 | |||
389 | for (i = 0; i < delta && !list_empty(&free_entries); i++) { | ||
390 | entry = __dma_entry_alloc(); | ||
391 | kfree(entry); | ||
392 | } | ||
393 | |||
394 | nr_total_entries -= i; | ||
395 | } | ||
396 | |||
397 | if (nr_total_entries != num_entries) | ||
398 | ret = 1; | ||
399 | |||
400 | spin_unlock_irqrestore(&free_entries_lock, flags); | ||
401 | |||
402 | return ret; | ||
403 | } | ||
404 | EXPORT_SYMBOL(dma_debug_resize_entries); | ||
405 | |||
313 | /* | 406 | /* |
314 | * DMA-API debugging init code | 407 | * DMA-API debugging init code |
315 | * | 408 | * |
@@ -439,6 +532,8 @@ void dma_debug_init(u32 num_entries) | |||
439 | return; | 532 | return; |
440 | } | 533 | } |
441 | 534 | ||
535 | nr_total_entries = num_free_entries; | ||
536 | |||
442 | printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); | 537 | printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); |
443 | } | 538 | } |
444 | 539 | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 2b0b5a7d2ced..bffe6d7ef9d9 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -60,8 +60,8 @@ enum dma_sync_target { | |||
60 | int swiotlb_force; | 60 | int swiotlb_force; |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * Used to do a quick range check in swiotlb_unmap_single and | 63 | * Used to do a quick range check in unmap_single and |
64 | * swiotlb_sync_single_*, to see if the memory was in fact allocated by this | 64 | * sync_single_*, to see if the memory was in fact allocated by this |
65 | * API. | 65 | * API. |
66 | */ | 66 | */ |
67 | static char *io_tlb_start, *io_tlb_end; | 67 | static char *io_tlb_start, *io_tlb_end; |
@@ -129,7 +129,7 @@ dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | |||
129 | return paddr; | 129 | return paddr; |
130 | } | 130 | } |
131 | 131 | ||
132 | phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr) | 132 | phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) |
133 | { | 133 | { |
134 | return baddr; | 134 | return baddr; |
135 | } | 135 | } |
@@ -140,9 +140,15 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, | |||
140 | return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); | 140 | return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); |
141 | } | 141 | } |
142 | 142 | ||
143 | static void *swiotlb_bus_to_virt(dma_addr_t address) | 143 | void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address) |
144 | { | 144 | { |
145 | return phys_to_virt(swiotlb_bus_to_phys(address)); | 145 | return phys_to_virt(swiotlb_bus_to_phys(hwdev, address)); |
146 | } | ||
147 | |||
148 | int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev, | ||
149 | dma_addr_t addr, size_t size) | ||
150 | { | ||
151 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | ||
146 | } | 152 | } |
147 | 153 | ||
148 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) | 154 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) |
@@ -309,10 +315,10 @@ cleanup1: | |||
309 | return -ENOMEM; | 315 | return -ENOMEM; |
310 | } | 316 | } |
311 | 317 | ||
312 | static int | 318 | static inline int |
313 | address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) | 319 | address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) |
314 | { | 320 | { |
315 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | 321 | return swiotlb_arch_address_needs_mapping(hwdev, addr, size); |
316 | } | 322 | } |
317 | 323 | ||
318 | static inline int range_needs_mapping(phys_addr_t paddr, size_t size) | 324 | static inline int range_needs_mapping(phys_addr_t paddr, size_t size) |
@@ -341,7 +347,7 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, | |||
341 | unsigned long flags; | 347 | unsigned long flags; |
342 | 348 | ||
343 | while (size) { | 349 | while (size) { |
344 | sz = min(PAGE_SIZE - offset, size); | 350 | sz = min_t(size_t, PAGE_SIZE - offset, size); |
345 | 351 | ||
346 | local_irq_save(flags); | 352 | local_irq_save(flags); |
347 | buffer = kmap_atomic(pfn_to_page(pfn), | 353 | buffer = kmap_atomic(pfn_to_page(pfn), |
@@ -476,7 +482,7 @@ found: | |||
476 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. | 482 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. |
477 | */ | 483 | */ |
478 | static void | 484 | static void |
479 | unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | 485 | do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) |
480 | { | 486 | { |
481 | unsigned long flags; | 487 | unsigned long flags; |
482 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 488 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
@@ -560,7 +566,6 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
560 | size)) { | 566 | size)) { |
561 | /* | 567 | /* |
562 | * The allocated memory isn't reachable by the device. | 568 | * The allocated memory isn't reachable by the device. |
563 | * Fall back on swiotlb_map_single(). | ||
564 | */ | 569 | */ |
565 | free_pages((unsigned long) ret, order); | 570 | free_pages((unsigned long) ret, order); |
566 | ret = NULL; | 571 | ret = NULL; |
@@ -568,9 +573,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
568 | if (!ret) { | 573 | if (!ret) { |
569 | /* | 574 | /* |
570 | * We are either out of memory or the device can't DMA | 575 | * We are either out of memory or the device can't DMA |
571 | * to GFP_DMA memory; fall back on | 576 | * to GFP_DMA memory; fall back on map_single(), which |
572 | * swiotlb_map_single(), which will grab memory from | 577 | * will grab memory from the lowest available address range. |
573 | * the lowest available address range. | ||
574 | */ | 578 | */ |
575 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); | 579 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); |
576 | if (!ret) | 580 | if (!ret) |
@@ -587,7 +591,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
587 | (unsigned long long)dev_addr); | 591 | (unsigned long long)dev_addr); |
588 | 592 | ||
589 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 593 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
590 | unmap_single(hwdev, ret, size, DMA_TO_DEVICE); | 594 | do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); |
591 | return NULL; | 595 | return NULL; |
592 | } | 596 | } |
593 | *dma_handle = dev_addr; | 597 | *dma_handle = dev_addr; |
@@ -604,7 +608,7 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
604 | free_pages((unsigned long) vaddr, get_order(size)); | 608 | free_pages((unsigned long) vaddr, get_order(size)); |
605 | else | 609 | else |
606 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 610 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
607 | unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); | 611 | do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); |
608 | } | 612 | } |
609 | EXPORT_SYMBOL(swiotlb_free_coherent); | 613 | EXPORT_SYMBOL(swiotlb_free_coherent); |
610 | 614 | ||
@@ -634,7 +638,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | |||
634 | * physical address to use is returned. | 638 | * physical address to use is returned. |
635 | * | 639 | * |
636 | * Once the device is given the dma address, the device owns this memory until | 640 | * Once the device is given the dma address, the device owns this memory until |
637 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. | 641 | * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed. |
638 | */ | 642 | */ |
639 | dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | 643 | dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, |
640 | unsigned long offset, size_t size, | 644 | unsigned long offset, size_t size, |
@@ -642,18 +646,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
642 | struct dma_attrs *attrs) | 646 | struct dma_attrs *attrs) |
643 | { | 647 | { |
644 | phys_addr_t phys = page_to_phys(page) + offset; | 648 | phys_addr_t phys = page_to_phys(page) + offset; |
645 | void *ptr = page_address(page) + offset; | ||
646 | dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); | 649 | dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); |
647 | void *map; | 650 | void *map; |
648 | 651 | ||
649 | BUG_ON(dir == DMA_NONE); | 652 | BUG_ON(dir == DMA_NONE); |
650 | /* | 653 | /* |
651 | * If the pointer passed in happens to be in the device's DMA window, | 654 | * If the address happens to be in the device's DMA window, |
652 | * we can safely return the device addr and not worry about bounce | 655 | * we can safely return the device addr and not worry about bounce |
653 | * buffering it. | 656 | * buffering it. |
654 | */ | 657 | */ |
655 | if (!address_needs_mapping(dev, dev_addr, size) && | 658 | if (!address_needs_mapping(dev, dev_addr, size) && |
656 | !range_needs_mapping(virt_to_phys(ptr), size)) | 659 | !range_needs_mapping(phys, size)) |
657 | return dev_addr; | 660 | return dev_addr; |
658 | 661 | ||
659 | /* | 662 | /* |
@@ -679,23 +682,35 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); | |||
679 | 682 | ||
680 | /* | 683 | /* |
681 | * Unmap a single streaming mode DMA translation. The dma_addr and size must | 684 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
682 | * match what was provided for in a previous swiotlb_map_single call. All | 685 | * match what was provided for in a previous swiotlb_map_page call. All |
683 | * other usages are undefined. | 686 | * other usages are undefined. |
684 | * | 687 | * |
685 | * After this call, reads by the cpu to the buffer are guaranteed to see | 688 | * After this call, reads by the cpu to the buffer are guaranteed to see |
686 | * whatever the device wrote there. | 689 | * whatever the device wrote there. |
687 | */ | 690 | */ |
691 | static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, | ||
692 | size_t size, int dir) | ||
693 | { | ||
694 | char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); | ||
695 | |||
696 | BUG_ON(dir == DMA_NONE); | ||
697 | |||
698 | if (is_swiotlb_buffer(dma_addr)) { | ||
699 | do_unmap_single(hwdev, dma_addr, size, dir); | ||
700 | return; | ||
701 | } | ||
702 | |||
703 | if (dir != DMA_FROM_DEVICE) | ||
704 | return; | ||
705 | |||
706 | dma_mark_clean(dma_addr, size); | ||
707 | } | ||
708 | |||
688 | void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | 709 | void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
689 | size_t size, enum dma_data_direction dir, | 710 | size_t size, enum dma_data_direction dir, |
690 | struct dma_attrs *attrs) | 711 | struct dma_attrs *attrs) |
691 | { | 712 | { |
692 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); | 713 | unmap_single(hwdev, dev_addr, size, dir); |
693 | |||
694 | BUG_ON(dir == DMA_NONE); | ||
695 | if (is_swiotlb_buffer(dma_addr)) | ||
696 | unmap_single(hwdev, dma_addr, size, dir); | ||
697 | else if (dir == DMA_FROM_DEVICE) | ||
698 | dma_mark_clean(dma_addr, size); | ||
699 | } | 714 | } |
700 | EXPORT_SYMBOL_GPL(swiotlb_unmap_page); | 715 | EXPORT_SYMBOL_GPL(swiotlb_unmap_page); |
701 | 716 | ||
@@ -703,7 +718,7 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page); | |||
703 | * Make physical memory consistent for a single streaming mode DMA translation | 718 | * Make physical memory consistent for a single streaming mode DMA translation |
704 | * after a transfer. | 719 | * after a transfer. |
705 | * | 720 | * |
706 | * If you perform a swiotlb_map_single() but wish to interrogate the buffer | 721 | * If you perform a swiotlb_map_page() but wish to interrogate the buffer |
707 | * using the cpu, yet do not wish to teardown the dma mapping, you must | 722 | * using the cpu, yet do not wish to teardown the dma mapping, you must |
708 | * call this function before doing so. At the next point you give the dma | 723 | * call this function before doing so. At the next point you give the dma |
709 | * address back to the card, you must first perform a | 724 | * address back to the card, you must first perform a |
@@ -713,13 +728,19 @@ static void | |||
713 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | 728 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
714 | size_t size, int dir, int target) | 729 | size_t size, int dir, int target) |
715 | { | 730 | { |
716 | char *dma_addr = swiotlb_bus_to_virt(dev_addr); | 731 | char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); |
717 | 732 | ||
718 | BUG_ON(dir == DMA_NONE); | 733 | BUG_ON(dir == DMA_NONE); |
719 | if (is_swiotlb_buffer(dma_addr)) | 734 | |
735 | if (is_swiotlb_buffer(dma_addr)) { | ||
720 | sync_single(hwdev, dma_addr, size, dir, target); | 736 | sync_single(hwdev, dma_addr, size, dir, target); |
721 | else if (dir == DMA_FROM_DEVICE) | 737 | return; |
722 | dma_mark_clean(dma_addr, size); | 738 | } |
739 | |||
740 | if (dir != DMA_FROM_DEVICE) | ||
741 | return; | ||
742 | |||
743 | dma_mark_clean(dma_addr, size); | ||
723 | } | 744 | } |
724 | 745 | ||
725 | void | 746 | void |
@@ -746,13 +767,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | |||
746 | unsigned long offset, size_t size, | 767 | unsigned long offset, size_t size, |
747 | int dir, int target) | 768 | int dir, int target) |
748 | { | 769 | { |
749 | char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset; | 770 | swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target); |
750 | |||
751 | BUG_ON(dir == DMA_NONE); | ||
752 | if (is_swiotlb_buffer(dma_addr)) | ||
753 | sync_single(hwdev, dma_addr, size, dir, target); | ||
754 | else if (dir == DMA_FROM_DEVICE) | ||
755 | dma_mark_clean(dma_addr, size); | ||
756 | } | 771 | } |
757 | 772 | ||
758 | void | 773 | void |
@@ -777,7 +792,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | |||
777 | 792 | ||
778 | /* | 793 | /* |
779 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | 794 | * Map a set of buffers described by scatterlist in streaming mode for DMA. |
780 | * This is the scatter-gather version of the above swiotlb_map_single | 795 | * This is the scatter-gather version of the above swiotlb_map_page |
781 | * interface. Here the scatter gather list elements are each tagged with the | 796 | * interface. Here the scatter gather list elements are each tagged with the |
782 | * appropriate dma address and length. They are obtained via | 797 | * appropriate dma address and length. They are obtained via |
783 | * sg_dma_{address,length}(SG). | 798 | * sg_dma_{address,length}(SG). |
@@ -788,7 +803,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | |||
788 | * The routine returns the number of addr/length pairs actually | 803 | * The routine returns the number of addr/length pairs actually |
789 | * used, at most nents. | 804 | * used, at most nents. |
790 | * | 805 | * |
791 | * Device ownership issues as mentioned above for swiotlb_map_single are the | 806 | * Device ownership issues as mentioned above for swiotlb_map_page are the |
792 | * same here. | 807 | * same here. |
793 | */ | 808 | */ |
794 | int | 809 | int |
@@ -836,7 +851,7 @@ EXPORT_SYMBOL(swiotlb_map_sg); | |||
836 | 851 | ||
837 | /* | 852 | /* |
838 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules | 853 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules |
839 | * concerning calls here are the same as for swiotlb_unmap_single() above. | 854 | * concerning calls here are the same as for swiotlb_unmap_page() above. |
840 | */ | 855 | */ |
841 | void | 856 | void |
842 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 857 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
@@ -847,13 +862,9 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
847 | 862 | ||
848 | BUG_ON(dir == DMA_NONE); | 863 | BUG_ON(dir == DMA_NONE); |
849 | 864 | ||
850 | for_each_sg(sgl, sg, nelems, i) { | 865 | for_each_sg(sgl, sg, nelems, i) |
851 | if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) | 866 | unmap_single(hwdev, sg->dma_address, sg->dma_length, dir); |
852 | unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), | 867 | |
853 | sg->dma_length, dir); | ||
854 | else if (dir == DMA_FROM_DEVICE) | ||
855 | dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); | ||
856 | } | ||
857 | } | 868 | } |
858 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); | 869 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); |
859 | 870 | ||
@@ -879,15 +890,9 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | |||
879 | struct scatterlist *sg; | 890 | struct scatterlist *sg; |
880 | int i; | 891 | int i; |
881 | 892 | ||
882 | BUG_ON(dir == DMA_NONE); | 893 | for_each_sg(sgl, sg, nelems, i) |
883 | 894 | swiotlb_sync_single(hwdev, sg->dma_address, | |
884 | for_each_sg(sgl, sg, nelems, i) { | ||
885 | if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) | ||
886 | sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), | ||
887 | sg->dma_length, dir, target); | 895 | sg->dma_length, dir, target); |
888 | else if (dir == DMA_FROM_DEVICE) | ||
889 | dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); | ||
890 | } | ||
891 | } | 896 | } |
892 | 897 | ||
893 | void | 898 | void |