aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm/tlbflush.h
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2007-02-05 08:47:51 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2007-02-08 09:49:27 -0500
commite6a5d66f58431c66c79e236f722a5ad7dd959ef3 (patch)
treef2153821e15aa4f5f30d02d0bc6d9a535ea168a4 /include/asm-arm/tlbflush.h
parent9d99df4b10eef130dacb5f772cd589c625b03634 (diff)
[ARM] 4129/1: Add barriers after the TLB operations
The architecture specification states that TLB operations are guaranteed to be complete only after the execution of a DSB (Data Synchronisation Barrier, former Data Write Barrier or Drain Write Buffer). The branch target cache invalidation is also needed. The ISB (Instruction Synchronisation Barrier, formerly Prefetch Flush) is needed unless there will be a return from exception before the corresponding mapping is used (i.e. user mappings). Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'include/asm-arm/tlbflush.h')
-rw-r--r--include/asm-arm/tlbflush.h50
1 files changed, 38 insertions, 12 deletions
diff --git a/include/asm-arm/tlbflush.h b/include/asm-arm/tlbflush.h
index cd10a0b5f8a..08c6991dc9c 100644
--- a/include/asm-arm/tlbflush.h
+++ b/include/asm-arm/tlbflush.h
@@ -247,7 +247,7 @@ static inline void local_flush_tlb_all(void)
247 const unsigned int __tlb_flag = __cpu_tlb_flags; 247 const unsigned int __tlb_flag = __cpu_tlb_flags;
248 248
249 if (tlb_flag(TLB_WB)) 249 if (tlb_flag(TLB_WB))
250 asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc"); 250 dsb();
251 251
252 if (tlb_flag(TLB_V3_FULL)) 252 if (tlb_flag(TLB_V3_FULL))
253 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); 253 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
@@ -257,6 +257,15 @@ static inline void local_flush_tlb_all(void)
257 asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); 257 asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
258 if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL)) 258 if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
259 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); 259 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
260
261 if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
262 TLB_V6_I_PAGE | TLB_V6_D_PAGE |
263 TLB_V6_I_ASID | TLB_V6_D_ASID)) {
264 /* flush the branch target cache */
265 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
266 dsb();
267 isb();
268 }
260} 269}
261 270
262static inline void local_flush_tlb_mm(struct mm_struct *mm) 271static inline void local_flush_tlb_mm(struct mm_struct *mm)
@@ -266,7 +275,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
266 const unsigned int __tlb_flag = __cpu_tlb_flags; 275 const unsigned int __tlb_flag = __cpu_tlb_flags;
267 276
268 if (tlb_flag(TLB_WB)) 277 if (tlb_flag(TLB_WB))
269 asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc"); 278 dsb();
270 279
271 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) { 280 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
272 if (tlb_flag(TLB_V3_FULL)) 281 if (tlb_flag(TLB_V3_FULL))
@@ -285,6 +294,14 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
285 asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc"); 294 asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc");
286 if (tlb_flag(TLB_V6_I_ASID)) 295 if (tlb_flag(TLB_V6_I_ASID))
287 asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc"); 296 asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
297
298 if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
299 TLB_V6_I_PAGE | TLB_V6_D_PAGE |
300 TLB_V6_I_ASID | TLB_V6_D_ASID)) {
301 /* flush the branch target cache */
302 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
303 dsb();
304 }
288} 305}
289 306
290static inline void 307static inline void
@@ -296,7 +313,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
296 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); 313 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
297 314
298 if (tlb_flag(TLB_WB)) 315 if (tlb_flag(TLB_WB))
299 asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero)); 316 dsb();
300 317
301 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 318 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
302 if (tlb_flag(TLB_V3_PAGE)) 319 if (tlb_flag(TLB_V3_PAGE))
@@ -317,6 +334,14 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
317 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); 334 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
318 if (tlb_flag(TLB_V6_I_PAGE)) 335 if (tlb_flag(TLB_V6_I_PAGE))
319 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); 336 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
337
338 if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
339 TLB_V6_I_PAGE | TLB_V6_D_PAGE |
340 TLB_V6_I_ASID | TLB_V6_D_ASID)) {
341 /* flush the branch target cache */
342 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
343 dsb();
344 }
320} 345}
321 346
322static inline void local_flush_tlb_kernel_page(unsigned long kaddr) 347static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
@@ -327,7 +352,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
327 kaddr &= PAGE_MASK; 352 kaddr &= PAGE_MASK;
328 353
329 if (tlb_flag(TLB_WB)) 354 if (tlb_flag(TLB_WB))
330 asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc"); 355 dsb();
331 356
332 if (tlb_flag(TLB_V3_PAGE)) 357 if (tlb_flag(TLB_V3_PAGE))
333 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc"); 358 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc");
@@ -347,11 +372,14 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
347 if (tlb_flag(TLB_V6_I_PAGE)) 372 if (tlb_flag(TLB_V6_I_PAGE))
348 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); 373 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
349 374
350 /* The ARM ARM states that the completion of a TLB maintenance 375 if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
351 * operation is only guaranteed by a DSB instruction 376 TLB_V6_I_PAGE | TLB_V6_D_PAGE |
352 */ 377 TLB_V6_I_ASID | TLB_V6_D_ASID)) {
353 if (tlb_flag(TLB_V6_U_PAGE | TLB_V6_D_PAGE | TLB_V6_I_PAGE)) 378 /* flush the branch target cache */
354 asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc"); 379 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
380 dsb();
381 isb();
382 }
355} 383}
356 384
357/* 385/*
@@ -369,15 +397,13 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
369 */ 397 */
370static inline void flush_pmd_entry(pmd_t *pmd) 398static inline void flush_pmd_entry(pmd_t *pmd)
371{ 399{
372 const unsigned int zero = 0;
373 const unsigned int __tlb_flag = __cpu_tlb_flags; 400 const unsigned int __tlb_flag = __cpu_tlb_flags;
374 401
375 if (tlb_flag(TLB_DCLEAN)) 402 if (tlb_flag(TLB_DCLEAN))
376 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" 403 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd"
377 : : "r" (pmd) : "cc"); 404 : : "r" (pmd) : "cc");
378 if (tlb_flag(TLB_WB)) 405 if (tlb_flag(TLB_WB))
379 asm("mcr p15, 0, %0, c7, c10, 4 @ flush_pmd" 406 dsb();
380 : : "r" (zero) : "cc");
381} 407}
382 408
383static inline void clean_pmd_entry(pmd_t *pmd) 409static inline void clean_pmd_entry(pmd_t *pmd)