From cde14bbfb3aa79b479db35bd29e6c083513d8614 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 5 Feb 2007 18:46:40 -0800 Subject: [IA64] swiotlb bug fixes This patch fixes - marking I-cache clean of pages DMAed to now only done for IA64 - broken multiple inclusion in include/asm-x86_64/swiotlb.h - missing call to mark_clean in swiotlb_sync_sg() - a (perhaps only theoretical) issue in swiotlb_dma_supported() when io_tlb_end is exactly at the end of memory Signed-off-by: Jan Beulich Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/mm/init.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/ia64/mm') diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 8b7599808dd5..faaca21a3718 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -129,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte) set_bit(PG_arch_1, &page->flags); /* mark page as clean */ } +/* + * Since DMA is i-cache coherent, any (complete) pages that were written via + * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to + * flush them when they get mapped into an executable vm-area. + */ +void +dma_mark_clean(void *addr, size_t size) +{ + unsigned long pg_addr, end; + + pg_addr = PAGE_ALIGN((unsigned long) addr); + end = (unsigned long) addr + size; + while (pg_addr + PAGE_SIZE <= end) { + struct page *page = virt_to_page(pg_addr); + set_bit(PG_arch_1, &page->flags); + pg_addr += PAGE_SIZE; + } +} + inline void ia64_set_rbs_bot (void) { -- cgit v1.2.2