aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-09-08 03:21:00 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-09-08 03:21:00 -0400
commit6e4154d4c2dd3d7e61d19ddd2527322ce34c2f5a (patch)
tree5c5eab6df31e0dedefc57eae22251267f234b08c /arch/sh
parent0906a3ad33a254094fb74828e3ddb9af8771a6da (diff)
sh: Use more aggressive dcache purging in kmap teardown.
This fixes up a number of outstanding issues observed with old mappings on the same colour hanging around. This requires some more optimal handling, but is a safe fallback until all of the corner cases have been handled. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/mm/cache.c11
-rw-r--r--arch/sh/mm/kmap.c3
2 files changed, 9 insertions, 5 deletions
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 8e4a8d1ac4a9..35c37b7f717a 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -97,7 +97,7 @@ void copy_user_highpage(struct page *to, struct page *from,
97 } 97 }
98 98
99 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) 99 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
100 __flush_wback_region(vto, PAGE_SIZE); 100 __flush_purge_region(vto, PAGE_SIZE);
101 101
102 kunmap_atomic(vto, KM_USER1); 102 kunmap_atomic(vto, KM_USER1);
103 /* Make sure this page is cleared on other CPU's too before using it */ 103 /* Make sure this page is cleared on other CPU's too before using it */
@@ -112,7 +112,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
112 clear_page(kaddr); 112 clear_page(kaddr);
113 113
114 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) 114 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
115 __flush_wback_region(kaddr, PAGE_SIZE); 115 __flush_purge_region(kaddr, PAGE_SIZE);
116 116
117 kunmap_atomic(kaddr, KM_USER0); 117 kunmap_atomic(kaddr, KM_USER0);
118} 118}
@@ -134,7 +134,7 @@ void __update_cache(struct vm_area_struct *vma,
134 unsigned long addr = (unsigned long)page_address(page); 134 unsigned long addr = (unsigned long)page_address(page);
135 135
136 if (pages_do_alias(addr, address & PAGE_MASK)) 136 if (pages_do_alias(addr, address & PAGE_MASK))
137 __flush_wback_region((void *)addr, PAGE_SIZE); 137 __flush_purge_region((void *)addr, PAGE_SIZE);
138 } 138 }
139 } 139 }
140} 140}
@@ -149,10 +149,11 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
149 void *kaddr; 149 void *kaddr;
150 150
151 kaddr = kmap_coherent(page, vmaddr); 151 kaddr = kmap_coherent(page, vmaddr);
152 __flush_wback_region((void *)kaddr, PAGE_SIZE); 152 /* XXX.. For now kunmap_coherent() does a purge */
153 /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
153 kunmap_coherent(kaddr); 154 kunmap_coherent(kaddr);
154 } else 155 } else
155 __flush_wback_region((void *)addr, PAGE_SIZE); 156 __flush_purge_region((void *)addr, PAGE_SIZE);
156 } 157 }
157} 158}
158 159
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
index c52cd8c40a64..16e01b5fed04 100644
--- a/arch/sh/mm/kmap.c
+++ b/arch/sh/mm/kmap.c
@@ -54,6 +54,9 @@ void kunmap_coherent(void *kvaddr)
54 unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK; 54 unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK;
55 enum fixed_addresses idx = __virt_to_fix(vaddr); 55 enum fixed_addresses idx = __virt_to_fix(vaddr);
56 56
57 /* XXX.. Kill this later, here for sanity at the moment.. */
58 __flush_purge_region((void *)vaddr, PAGE_SIZE);
59
57 pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx); 60 pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx);
58 local_flush_tlb_one(get_asid(), vaddr); 61 local_flush_tlb_one(get_asid(), vaddr);
59 } 62 }