aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-09-08 03:21:00 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-09-08 03:21:00 -0400
commit6e4154d4c2dd3d7e61d19ddd2527322ce34c2f5a (patch)
tree5c5eab6df31e0dedefc57eae22251267f234b08c /arch/sh/mm/cache.c
parent0906a3ad33a254094fb74828e3ddb9af8771a6da (diff)
sh: Use more aggressive dcache purging in kmap teardown.
This fixes up a number of outstanding issues observed with old mappings on the same colour hanging around. This requires some more optimal handling, but is a safe fallback until all of the corner cases have been handled. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache.c')
-rw-r--r--arch/sh/mm/cache.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 8e4a8d1ac4a9..35c37b7f717a 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -97,7 +97,7 @@ void copy_user_highpage(struct page *to, struct page *from,
97 } 97 }
98 98
99 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) 99 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
100 __flush_wback_region(vto, PAGE_SIZE); 100 __flush_purge_region(vto, PAGE_SIZE);
101 101
102 kunmap_atomic(vto, KM_USER1); 102 kunmap_atomic(vto, KM_USER1);
103 /* Make sure this page is cleared on other CPU's too before using it */ 103 /* Make sure this page is cleared on other CPU's too before using it */
@@ -112,7 +112,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
112 clear_page(kaddr); 112 clear_page(kaddr);
113 113
114 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) 114 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
115 __flush_wback_region(kaddr, PAGE_SIZE); 115 __flush_purge_region(kaddr, PAGE_SIZE);
116 116
117 kunmap_atomic(kaddr, KM_USER0); 117 kunmap_atomic(kaddr, KM_USER0);
118} 118}
@@ -134,7 +134,7 @@ void __update_cache(struct vm_area_struct *vma,
134 unsigned long addr = (unsigned long)page_address(page); 134 unsigned long addr = (unsigned long)page_address(page);
135 135
136 if (pages_do_alias(addr, address & PAGE_MASK)) 136 if (pages_do_alias(addr, address & PAGE_MASK))
137 __flush_wback_region((void *)addr, PAGE_SIZE); 137 __flush_purge_region((void *)addr, PAGE_SIZE);
138 } 138 }
139 } 139 }
140} 140}
@@ -149,10 +149,11 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
149 void *kaddr; 149 void *kaddr;
150 150
151 kaddr = kmap_coherent(page, vmaddr); 151 kaddr = kmap_coherent(page, vmaddr);
152 __flush_wback_region((void *)kaddr, PAGE_SIZE); 152 /* XXX.. For now kunmap_coherent() does a purge */
153 /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
153 kunmap_coherent(kaddr); 154 kunmap_coherent(kaddr);
154 } else 155 } else
155 __flush_wback_region((void *)addr, PAGE_SIZE); 156 __flush_purge_region((void *)addr, PAGE_SIZE);
156 } 157 }
157} 158}
158 159