aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2007-02-05 21:46:40 -0500
committerTony Luck <tony.luck@intel.com>2007-02-05 21:46:40 -0500
commitcde14bbfb3aa79b479db35bd29e6c083513d8614 (patch)
tree68b2d66d1eee3067051f4a6e4df8ace461bf440f /arch/ia64/mm
parent86afa9eb88af2248bcc91d5b3568c63fdea65d6c (diff)
[IA64] swiotlb bug fixes
This patch fixes - marking I-cache clean of pages DMAed to now only done for IA64 - broken multiple inclusion in include/asm-x86_64/swiotlb.h - missing call to mark_clean in swiotlb_sync_sg() - a (perhaps only theoretical) issue in swiotlb_dma_supported() when io_tlb_end is exactly at the end of memory Signed-off-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/init.c19
1 files changed, 19 insertions, 0 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 8b7599808dd5..faaca21a3718 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -129,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
129 set_bit(PG_arch_1, &page->flags); /* mark page as clean */ 129 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
130} 130}
131 131
132/*
133 * Since DMA is i-cache coherent, any (complete) pages that were written via
134 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
135 * flush them when they get mapped into an executable vm-area.
136 */
137void
138dma_mark_clean(void *addr, size_t size)
139{
140 unsigned long pg_addr, end;
141
142 pg_addr = PAGE_ALIGN((unsigned long) addr);
143 end = (unsigned long) addr + size;
144 while (pg_addr + PAGE_SIZE <= end) {
145 struct page *page = virt_to_page(pg_addr);
146 set_bit(PG_arch_1, &page->flags);
147 pg_addr += PAGE_SIZE;
148 }
149}
150
132inline void 151inline void
133ia64_set_rbs_bot (void) 152ia64_set_rbs_bot (void)
134{ 153{