aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/mm/init.c19
-rw-r--r--include/asm-ia64/dma.h2
-rw-r--r--include/asm-x86_64/swiotlb.h7
-rw-r--r--lib/swiotlb.c33
4 files changed, 33 insertions, 28 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 8b7599808dd5..faaca21a3718 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -129,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
129 set_bit(PG_arch_1, &page->flags); /* mark page as clean */ 129 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
130} 130}
131 131
132/*
133 * Since DMA is i-cache coherent, any (complete) pages that were written via
134 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
135 * flush them when they get mapped into an executable vm-area.
136 */
137void
138dma_mark_clean(void *addr, size_t size)
139{
140 unsigned long pg_addr, end;
141
142 pg_addr = PAGE_ALIGN((unsigned long) addr);
143 end = (unsigned long) addr + size;
144 while (pg_addr + PAGE_SIZE <= end) {
145 struct page *page = virt_to_page(pg_addr);
146 set_bit(PG_arch_1, &page->flags);
147 pg_addr += PAGE_SIZE;
148 }
149}
150
132inline void 151inline void
133ia64_set_rbs_bot (void) 152ia64_set_rbs_bot (void)
134{ 153{
diff --git a/include/asm-ia64/dma.h b/include/asm-ia64/dma.h
index dad3a735df8b..4d97f60f1ef5 100644
--- a/include/asm-ia64/dma.h
+++ b/include/asm-ia64/dma.h
@@ -19,4 +19,6 @@ extern unsigned long MAX_DMA_ADDRESS;
19 19
20#define free_dma(x) 20#define free_dma(x)
21 21
22void dma_mark_clean(void *addr, size_t size);
23
22#endif /* _ASM_IA64_DMA_H */ 24#endif /* _ASM_IA64_DMA_H */
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index ba94ab3d2673..f9c589539a82 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -1,6 +1,5 @@
1#ifndef _ASM_SWIOTLB_H 1#ifndef _ASM_SWIOTLB_H
2#define _ASM_SWTIOLB_H 1 2#define _ASM_SWIOTLB_H 1
3
4 3
5#include <asm/dma-mapping.h> 4#include <asm/dma-mapping.h>
6 5
@@ -52,4 +51,6 @@ extern int swiotlb;
52 51
53extern void pci_swiotlb_init(void); 52extern void pci_swiotlb_init(void);
54 53
55#endif /* _ASM_SWTIOLB_H */ 54static inline void dma_mark_clean(void *addr, size_t size) {}
55
56#endif /* _ASM_SWIOTLB_H */
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 10625785eefd..34278338aad0 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -558,25 +558,6 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
558} 558}
559 559
560/* 560/*
561 * Since DMA is i-cache coherent, any (complete) pages that were written via
562 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
563 * flush them when they get mapped into an executable vm-area.
564 */
565static void
566mark_clean(void *addr, size_t size)
567{
568 unsigned long pg_addr, end;
569
570 pg_addr = PAGE_ALIGN((unsigned long) addr);
571 end = (unsigned long) addr + size;
572 while (pg_addr + PAGE_SIZE <= end) {
573 struct page *page = virt_to_page(pg_addr);
574 set_bit(PG_arch_1, &page->flags);
575 pg_addr += PAGE_SIZE;
576 }
577}
578
579/*
580 * Unmap a single streaming mode DMA translation. The dma_addr and size must 561 * Unmap a single streaming mode DMA translation. The dma_addr and size must
581 * match what was provided for in a previous swiotlb_map_single call. All 562 * match what was provided for in a previous swiotlb_map_single call. All
582 * other usages are undefined. 563 * other usages are undefined.
@@ -594,7 +575,7 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
594 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 575 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
595 unmap_single(hwdev, dma_addr, size, dir); 576 unmap_single(hwdev, dma_addr, size, dir);
596 else if (dir == DMA_FROM_DEVICE) 577 else if (dir == DMA_FROM_DEVICE)
597 mark_clean(dma_addr, size); 578 dma_mark_clean(dma_addr, size);
598} 579}
599 580
600/* 581/*
@@ -617,7 +598,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
617 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 598 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
618 sync_single(hwdev, dma_addr, size, dir, target); 599 sync_single(hwdev, dma_addr, size, dir, target);
619 else if (dir == DMA_FROM_DEVICE) 600 else if (dir == DMA_FROM_DEVICE)
620 mark_clean(dma_addr, size); 601 dma_mark_clean(dma_addr, size);
621} 602}
622 603
623void 604void
@@ -648,7 +629,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
648 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 629 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
649 sync_single(hwdev, dma_addr, size, dir, target); 630 sync_single(hwdev, dma_addr, size, dir, target);
650 else if (dir == DMA_FROM_DEVICE) 631 else if (dir == DMA_FROM_DEVICE)
651 mark_clean(dma_addr, size); 632 dma_mark_clean(dma_addr, size);
652} 633}
653 634
654void 635void
@@ -698,7 +679,6 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
698 dev_addr = virt_to_phys(addr); 679 dev_addr = virt_to_phys(addr);
699 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { 680 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
700 void *map = map_single(hwdev, addr, sg->length, dir); 681 void *map = map_single(hwdev, addr, sg->length, dir);
701 sg->dma_address = virt_to_bus(map);
702 if (!map) { 682 if (!map) {
703 /* Don't panic here, we expect map_sg users 683 /* Don't panic here, we expect map_sg users
704 to do proper error handling. */ 684 to do proper error handling. */
@@ -707,6 +687,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
707 sg[0].dma_length = 0; 687 sg[0].dma_length = 0;
708 return 0; 688 return 0;
709 } 689 }
690 sg->dma_address = virt_to_bus(map);
710 } else 691 } else
711 sg->dma_address = dev_addr; 692 sg->dma_address = dev_addr;
712 sg->dma_length = sg->length; 693 sg->dma_length = sg->length;
@@ -730,7 +711,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
730 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 711 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
731 unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir); 712 unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir);
732 else if (dir == DMA_FROM_DEVICE) 713 else if (dir == DMA_FROM_DEVICE)
733 mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 714 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
734} 715}
735 716
736/* 717/*
@@ -752,6 +733,8 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
752 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 733 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
753 sync_single(hwdev, (void *) sg->dma_address, 734 sync_single(hwdev, (void *) sg->dma_address,
754 sg->dma_length, dir, target); 735 sg->dma_length, dir, target);
736 else if (dir == DMA_FROM_DEVICE)
737 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
755} 738}
756 739
757void 740void
@@ -783,7 +766,7 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
783int 766int
784swiotlb_dma_supported (struct device *hwdev, u64 mask) 767swiotlb_dma_supported (struct device *hwdev, u64 mask)
785{ 768{
786 return (virt_to_phys (io_tlb_end) - 1) <= mask; 769 return virt_to_phys(io_tlb_end - 1) <= mask;
787} 770}
788 771
789EXPORT_SYMBOL(swiotlb_init); 772EXPORT_SYMBOL(swiotlb_init);