aboutsummaryrefslogtreecommitdiffstats
path: root/lib/ioremap.c
diff options
context:
space:
mode:
authorHaavard Skinnemoen <hskinnemoen@atmel.com>2006-10-01 02:29:14 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-01 03:39:31 -0400
commitdb71daabad0821996483dfe309c4bc81d6755a70 (patch)
tree10e60d91178ccb0d1fd1c47321fc6c096db926e5 /lib/ioremap.c
parent74588d8ba34ff1bda027cfa737972af01ab00c8b (diff)
[PATCH] Generic ioremap_page_range: flush_cache_vmap
The existing implementation of ioremap_page_range(), which was taken from i386, does this: flush_cache_all(); /* modify page tables */ flush_tlb_all(); I think this is a bit defensive, so this patch changes the generic implementation to do: /* modify page tables */ flush_cache_vmap(start, end); instead, which is similar to what vmalloc() does. This should still be correct because we never modify existing PTEs. According to James Bottomley: The problem the flush_tlb_all() is trying to solve is to avoid stale tlb entries in the ioremap area. We're just being conservative by flushing on both map and unmap. Technically what vmalloc/vfree does (only flush the tlb on unmap) is just fine because it means that the only tlb entries in the remap area must belong to in-use mappings. Signed-off-by: Haavard Skinnemoen <hskinnemoen@atmel.com> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Mikael Starvik <starvik@axis.com> Cc: Andi Kleen <ak@suse.de> Cc: <linux-m32r@ml.linux-m32r.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Kyle McMartin <kyle@parisc-linux.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'lib/ioremap.c')
-rw-r--r--lib/ioremap.c4
1 files changed, 1 insertions, 3 deletions
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 29c810ec9813..99fa277f9f7b 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -76,8 +76,6 @@ int ioremap_page_range(unsigned long addr,
76 76
77 BUG_ON(addr >= end); 77 BUG_ON(addr >= end);
78 78
79 flush_cache_all();
80
81 start = addr; 79 start = addr;
82 phys_addr -= addr; 80 phys_addr -= addr;
83 pgd = pgd_offset_k(addr); 81 pgd = pgd_offset_k(addr);
@@ -88,7 +86,7 @@ int ioremap_page_range(unsigned long addr,
88 break; 86 break;
89 } while (pgd++, addr = next, addr != end); 87 } while (pgd++, addr = next, addr != end);
90 88
91 flush_tlb_all(); 89 flush_cache_vmap(start, end);
92 90
93 return err; 91 return err;
94} 92}