aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/c-sb1.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/c-sb1.c')
-rw-r--r--arch/mips/mm/c-sb1.c56
1 files changed, 31 insertions, 25 deletions
diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c
index 4bd9ad8a5e07..16bad7c0a63f 100644
--- a/arch/mips/mm/c-sb1.c
+++ b/arch/mips/mm/c-sb1.c
@@ -155,6 +155,26 @@ static inline void __sb1_flush_icache_all(void)
155} 155}
156 156
157/* 157/*
158 * Invalidate a range of the icache. The addresses are virtual, and
159 * the cache is virtually indexed and tagged. However, we don't
160 * necessarily have the right ASID context, so use index ops instead
161 * of hit ops.
162 */
163static inline void __sb1_flush_icache_range(unsigned long start,
164 unsigned long end)
165{
166 start &= ~(icache_line_size - 1);
167 end = (end + icache_line_size - 1) & ~(icache_line_size - 1);
168
169 while (start != end) {
170 cache_set_op(Index_Invalidate_I, start & icache_index_mask);
171 start += icache_line_size;
172 }
173 mispredict();
174 sync();
175}
176
177/*
158 * Flush the icache for a given physical page. Need to writeback the 178 * Flush the icache for a given physical page. Need to writeback the
159 * dcache first, then invalidate the icache. If the page isn't 179 * dcache first, then invalidate the icache. If the page isn't
160 * executable, nothing is required. 180 * executable, nothing is required.
@@ -173,8 +193,11 @@ static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long
173 /* 193 /*
174 * Bumping the ASID is probably cheaper than the flush ... 194 * Bumping the ASID is probably cheaper than the flush ...
175 */ 195 */
176 if (cpu_context(cpu, vma->vm_mm) != 0) 196 if (vma->vm_mm == current->active_mm) {
177 drop_mmu_context(vma->vm_mm, cpu); 197 if (cpu_context(cpu, vma->vm_mm) != 0)
198 drop_mmu_context(vma->vm_mm, cpu);
199 } else
200 __sb1_flush_icache_range(addr, addr + PAGE_SIZE);
178} 201}
179 202
180#ifdef CONFIG_SMP 203#ifdef CONFIG_SMP
@@ -210,26 +233,6 @@ void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsign
210 __attribute__((alias("local_sb1_flush_cache_page"))); 233 __attribute__((alias("local_sb1_flush_cache_page")));
211#endif 234#endif
212 235
213/*
214 * Invalidate a range of the icache. The addresses are virtual, and
215 * the cache is virtually indexed and tagged. However, we don't
216 * necessarily have the right ASID context, so use index ops instead
217 * of hit ops.
218 */
219static inline void __sb1_flush_icache_range(unsigned long start,
220 unsigned long end)
221{
222 start &= ~(icache_line_size - 1);
223 end = (end + icache_line_size - 1) & ~(icache_line_size - 1);
224
225 while (start != end) {
226 cache_set_op(Index_Invalidate_I, start & icache_index_mask);
227 start += icache_line_size;
228 }
229 mispredict();
230 sync();
231}
232
233 236
234/* 237/*
235 * Invalidate all caches on this CPU 238 * Invalidate all caches on this CPU
@@ -326,9 +329,12 @@ static void local_sb1_flush_icache_page(struct vm_area_struct *vma,
326 * If there's a context, bump the ASID (cheaper than a flush, 329 * If there's a context, bump the ASID (cheaper than a flush,
327 * since we don't know VAs!) 330 * since we don't know VAs!)
328 */ 331 */
329 if (cpu_context(cpu, vma->vm_mm) != 0) { 332 if (vma->vm_mm == current->active_mm) {
330 drop_mmu_context(vma->vm_mm, cpu); 333 if (cpu_context(cpu, vma->vm_mm) != 0)
331 } 334 drop_mmu_context(vma->vm_mm, cpu);
335 } else
336 __sb1_flush_icache_range(start, start + PAGE_SIZE);
337
332} 338}
333 339
334#ifdef CONFIG_SMP 340#ifdef CONFIG_SMP