aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorAtsushi Nemoto <anemo@mba.ocn.ne.jp>2006-08-25 04:55:31 -0400
committerRalf Baechle <ralf@linux-mips.org>2006-09-27 08:37:37 -0400
commitf6502791d780b22fc147150137704a07a05ba361 (patch)
tree687dbfcd20e60c480868eb1b64f784e8084b6e4b /arch/mips/mm
parenta94d702049569401c65b579d0751ce282f962b41 (diff)
[MIPS] Do not use drop_mmu_context to flusing other task's VIPT I-cache.
c-r4k.c and c-sb1.c use drop_mmu_context() to flush virtually tagged I-caches, but this does not work for flushing other task's icache. This is for example triggered by copy_to_user_page() called from ptrace(2). Use indexed flush for such cases. Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-r4k.c4
-rw-r--r--arch/mips/mm/c-sb1.c56
2 files changed, 33 insertions, 27 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 4e1498246343..2d729f6f6348 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -475,7 +475,7 @@ static inline void local_r4k_flush_cache_page(void *args)
475 } 475 }
476 } 476 }
477 if (exec) { 477 if (exec) {
478 if (cpu_has_vtag_icache) { 478 if (cpu_has_vtag_icache && mm == current->active_mm) {
479 int cpu = smp_processor_id(); 479 int cpu = smp_processor_id();
480 480
481 if (cpu_context(cpu, mm) != 0) 481 if (cpu_context(cpu, mm) != 0)
@@ -599,7 +599,7 @@ static inline void local_r4k_flush_icache_page(void *args)
599 * We're not sure of the virtual address(es) involved here, so 599 * We're not sure of the virtual address(es) involved here, so
600 * we have to flush the entire I-cache. 600 * we have to flush the entire I-cache.
601 */ 601 */
602 if (cpu_has_vtag_icache) { 602 if (cpu_has_vtag_icache && vma->vm_mm == current->active_mm) {
603 int cpu = smp_processor_id(); 603 int cpu = smp_processor_id();
604 604
605 if (cpu_context(cpu, vma->vm_mm) != 0) 605 if (cpu_context(cpu, vma->vm_mm) != 0)
diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c
index 4bd9ad8a5e07..16bad7c0a63f 100644
--- a/arch/mips/mm/c-sb1.c
+++ b/arch/mips/mm/c-sb1.c
@@ -155,6 +155,26 @@ static inline void __sb1_flush_icache_all(void)
155} 155}
156 156
157/* 157/*
158 * Invalidate a range of the icache. The addresses are virtual, and
159 * the cache is virtually indexed and tagged. However, we don't
160 * necessarily have the right ASID context, so use index ops instead
161 * of hit ops.
162 */
163static inline void __sb1_flush_icache_range(unsigned long start,
164 unsigned long end)
165{
166 start &= ~(icache_line_size - 1);
167 end = (end + icache_line_size - 1) & ~(icache_line_size - 1);
168
169 while (start != end) {
170 cache_set_op(Index_Invalidate_I, start & icache_index_mask);
171 start += icache_line_size;
172 }
173 mispredict();
174 sync();
175}
176
177/*
158 * Flush the icache for a given physical page. Need to writeback the 178 * Flush the icache for a given physical page. Need to writeback the
159 * dcache first, then invalidate the icache. If the page isn't 179 * dcache first, then invalidate the icache. If the page isn't
160 * executable, nothing is required. 180 * executable, nothing is required.
@@ -173,8 +193,11 @@ static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long
173 /* 193 /*
174 * Bumping the ASID is probably cheaper than the flush ... 194 * Bumping the ASID is probably cheaper than the flush ...
175 */ 195 */
176 if (cpu_context(cpu, vma->vm_mm) != 0) 196 if (vma->vm_mm == current->active_mm) {
177 drop_mmu_context(vma->vm_mm, cpu); 197 if (cpu_context(cpu, vma->vm_mm) != 0)
198 drop_mmu_context(vma->vm_mm, cpu);
199 } else
200 __sb1_flush_icache_range(addr, addr + PAGE_SIZE);
178} 201}
179 202
180#ifdef CONFIG_SMP 203#ifdef CONFIG_SMP
@@ -210,26 +233,6 @@ void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsign
210 __attribute__((alias("local_sb1_flush_cache_page"))); 233 __attribute__((alias("local_sb1_flush_cache_page")));
211#endif 234#endif
212 235
213/*
214 * Invalidate a range of the icache. The addresses are virtual, and
215 * the cache is virtually indexed and tagged. However, we don't
216 * necessarily have the right ASID context, so use index ops instead
217 * of hit ops.
218 */
219static inline void __sb1_flush_icache_range(unsigned long start,
220 unsigned long end)
221{
222 start &= ~(icache_line_size - 1);
223 end = (end + icache_line_size - 1) & ~(icache_line_size - 1);
224
225 while (start != end) {
226 cache_set_op(Index_Invalidate_I, start & icache_index_mask);
227 start += icache_line_size;
228 }
229 mispredict();
230 sync();
231}
232
233 236
234/* 237/*
235 * Invalidate all caches on this CPU 238 * Invalidate all caches on this CPU
@@ -326,9 +329,12 @@ static void local_sb1_flush_icache_page(struct vm_area_struct *vma,
326 * If there's a context, bump the ASID (cheaper than a flush, 329 * If there's a context, bump the ASID (cheaper than a flush,
327 * since we don't know VAs!) 330 * since we don't know VAs!)
328 */ 331 */
329 if (cpu_context(cpu, vma->vm_mm) != 0) { 332 if (vma->vm_mm == current->active_mm) {
330 drop_mmu_context(vma->vm_mm, cpu); 333 if (cpu_context(cpu, vma->vm_mm) != 0)
331 } 334 drop_mmu_context(vma->vm_mm, cpu);
335 } else
336 __sb1_flush_icache_range(start, start + PAGE_SIZE);
337
332} 338}
333 339
334#ifdef CONFIG_SMP 340#ifdef CONFIG_SMP