aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2007-10-15 11:30:24 -0400
committerRalf Baechle <ralf@linux-mips.org>2007-10-16 13:23:49 -0400
commit985c30ef4d7c2a4f0e979a507a7e2f7f09b096c3 (patch)
tree8f5a447adaa49b6efb7485e8b69b2743eca3ee4b /arch/mips/mm
parent736fad17b89e5e718908abb76ae9bce210a9d5d4 (diff)
[MIPS] Fix aliasing bug in copy_user_highpage, take 2.
Turns out b868868ae0f7272228c95cc760338ffe35bb739d wasn't quite right. When called for a page that isn't marked dirty it would artificially create an alias instead of doing the obvious thing and access the page via KSEG0. The same issue also exists in copy_to_user_page and copy_from_user_page which was causing the machine to die under rare circumstances for example when running ps if the BUG_ON() assertion added by the earlier fix was getting triggered. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/init.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 5240432e6d1d..110ee7656b41 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -211,7 +211,7 @@ void copy_user_highpage(struct page *to, struct page *from,
211 void *vfrom, *vto; 211 void *vfrom, *vto;
212 212
213 vto = kmap_atomic(to, KM_USER1); 213 vto = kmap_atomic(to, KM_USER1);
214 if (cpu_has_dc_aliases && !Page_dcache_dirty(from)) { 214 if (cpu_has_dc_aliases && page_mapped(from)) {
215 vfrom = kmap_coherent(from, vaddr); 215 vfrom = kmap_coherent(from, vaddr);
216 copy_page(vto, vfrom); 216 copy_page(vto, vfrom);
217 kunmap_coherent(); 217 kunmap_coherent();
@@ -234,12 +234,15 @@ void copy_to_user_page(struct vm_area_struct *vma,
234 struct page *page, unsigned long vaddr, void *dst, const void *src, 234 struct page *page, unsigned long vaddr, void *dst, const void *src,
235 unsigned long len) 235 unsigned long len)
236{ 236{
237 if (cpu_has_dc_aliases) { 237 if (cpu_has_dc_aliases && page_mapped(page)) {
238 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 238 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
239 memcpy(vto, src, len); 239 memcpy(vto, src, len);
240 kunmap_coherent(); 240 kunmap_coherent();
241 } else 241 } else {
242 memcpy(dst, src, len); 242 memcpy(dst, src, len);
243 if (cpu_has_dc_aliases)
244 SetPageDcacheDirty(page);
245 }
243 if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) 246 if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
244 flush_cache_page(vma, vaddr, page_to_pfn(page)); 247 flush_cache_page(vma, vaddr, page_to_pfn(page));
245} 248}
@@ -250,13 +253,15 @@ void copy_from_user_page(struct vm_area_struct *vma,
250 struct page *page, unsigned long vaddr, void *dst, const void *src, 253 struct page *page, unsigned long vaddr, void *dst, const void *src,
251 unsigned long len) 254 unsigned long len)
252{ 255{
253 if (cpu_has_dc_aliases) { 256 if (cpu_has_dc_aliases && page_mapped(page)) {
254 void *vfrom = 257 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
255 kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
256 memcpy(dst, vfrom, len); 258 memcpy(dst, vfrom, len);
257 kunmap_coherent(); 259 kunmap_coherent();
258 } else 260 } else {
259 memcpy(dst, src, len); 261 memcpy(dst, src, len);
262 if (cpu_has_dc_aliases)
263 SetPageDcacheDirty(page);
264 }
260} 265}
261 266
262EXPORT_SYMBOL(copy_from_user_page); 267EXPORT_SYMBOL(copy_from_user_page);