aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2008-02-16 17:34:25 -0500
committerRalf Baechle <ralf@linux-mips.org>2008-02-19 12:01:31 -0500
commit9a74b3eb22f2d67a5681301f52aca5b7703382c8 (patch)
tree3ef7b8713edfccc96ad1ce57a431828656d92d82 /arch/mips/mm
parentc42d95d6c49ce9c678a9d10aeb3f526c850d66dc (diff)
[MIPS] Fix buggy invocations of kmap_coherent()
kmap_coherent will only work correctly if the page it is called on is not marked dirty. If it's dirty the kernel address of the page should be used instead of a temporary mapping. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/cache.c15
-rw-r--r--arch/mips/mm/init.c9
2 files changed, 16 insertions, 8 deletions
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 81f30ac2bff9..6a24651971df 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -92,12 +92,17 @@ EXPORT_SYMBOL(__flush_dcache_page);
92 92
93void __flush_anon_page(struct page *page, unsigned long vmaddr) 93void __flush_anon_page(struct page *page, unsigned long vmaddr)
94{ 94{
95 if (pages_do_alias((unsigned long)page_address(page), vmaddr)) { 95 unsigned long addr = (unsigned long) page_address(page);
96 void *kaddr;
97 96
98 kaddr = kmap_coherent(page, vmaddr); 97 if (pages_do_alias(addr, vmaddr)) {
99 flush_data_cache_page((unsigned long)kaddr); 98 if (page_mapped(page) && !Page_dcache_dirty(page)) {
100 kunmap_coherent(); 99 void *kaddr;
100
101 kaddr = kmap_coherent(page, vmaddr);
102 flush_data_cache_page((unsigned long)kaddr);
103 kunmap_coherent();
104 } else
105 flush_data_cache_page(addr);
101 } 106 }
102} 107}
103 108
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 480dec04f552..c7aed133d11d 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -211,7 +211,8 @@ void copy_user_highpage(struct page *to, struct page *from,
211 void *vfrom, *vto; 211 void *vfrom, *vto;
212 212
213 vto = kmap_atomic(to, KM_USER1); 213 vto = kmap_atomic(to, KM_USER1);
214 if (cpu_has_dc_aliases && page_mapped(from)) { 214 if (cpu_has_dc_aliases &&
215 page_mapped(from) && !Page_dcache_dirty(from)) {
215 vfrom = kmap_coherent(from, vaddr); 216 vfrom = kmap_coherent(from, vaddr);
216 copy_page(vto, vfrom); 217 copy_page(vto, vfrom);
217 kunmap_coherent(); 218 kunmap_coherent();
@@ -234,7 +235,8 @@ void copy_to_user_page(struct vm_area_struct *vma,
234 struct page *page, unsigned long vaddr, void *dst, const void *src, 235 struct page *page, unsigned long vaddr, void *dst, const void *src,
235 unsigned long len) 236 unsigned long len)
236{ 237{
237 if (cpu_has_dc_aliases && page_mapped(page)) { 238 if (cpu_has_dc_aliases &&
239 page_mapped(page) && !Page_dcache_dirty(page)) {
238 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 240 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
239 memcpy(vto, src, len); 241 memcpy(vto, src, len);
240 kunmap_coherent(); 242 kunmap_coherent();
@@ -253,7 +255,8 @@ void copy_from_user_page(struct vm_area_struct *vma,
253 struct page *page, unsigned long vaddr, void *dst, const void *src, 255 struct page *page, unsigned long vaddr, void *dst, const void *src,
254 unsigned long len) 256 unsigned long len)
255{ 257{
256 if (cpu_has_dc_aliases && page_mapped(page)) { 258 if (cpu_has_dc_aliases &&
259 page_mapped(page) && !Page_dcache_dirty(page)) {
257 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 260 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
258 memcpy(dst, vfrom, len); 261 memcpy(dst, vfrom, len);
259 kunmap_coherent(); 262 kunmap_coherent();