aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-05-23 13:43:58 -0400
committerWill Deacon <will.deacon@arm.com>2013-08-12 07:25:45 -0400
commit62cbbc42e0019aff6310259f275ae812463f8836 (patch)
tree4f31f394d44b55c7b3112e3060e81fb51a947895
parent3ea128065ed20d33bd02ff6dab689f88e38000be (diff)
ARM: tlb: reduce scope of barrier domains for TLB invalidation
Our TLB invalidation routines may require a barrier before the maintenance (in order to ensure pending page table writes are visible to the hardware walker) and barriers afterwards (in order to ensure completion of the maintenance and visibility in the instruction stream). Whilst this is expensive, the cost can be reduced somewhat by reducing the scope of the barrier instructions: - The barrier before only needs to apply to stores (pte writes) - Local ops are required only to affect the non-shareable domain - Global ops are required only to affect the inner-shareable domain This patch makes these changes for the TLB flushing code. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm/include/asm/tlbflush.h36
1 files changed, 18 insertions, 18 deletions
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 84718240340c..38960264040c 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -335,13 +335,13 @@ static inline void local_flush_tlb_all(void)
335 const unsigned int __tlb_flag = __cpu_tlb_flags; 335 const unsigned int __tlb_flag = __cpu_tlb_flags;
336 336
337 if (tlb_flag(TLB_WB)) 337 if (tlb_flag(TLB_WB))
338 dsb(); 338 dsb(nshst);
339 339
340 __local_flush_tlb_all(); 340 __local_flush_tlb_all();
341 tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero); 341 tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero);
342 342
343 if (tlb_flag(TLB_BARRIER)) { 343 if (tlb_flag(TLB_BARRIER)) {
344 dsb(); 344 dsb(nsh);
345 isb(); 345 isb();
346 } 346 }
347} 347}
@@ -352,13 +352,13 @@ static inline void __flush_tlb_all(void)
352 const unsigned int __tlb_flag = __cpu_tlb_flags; 352 const unsigned int __tlb_flag = __cpu_tlb_flags;
353 353
354 if (tlb_flag(TLB_WB)) 354 if (tlb_flag(TLB_WB))
355 dsb(); 355 dsb(ishst);
356 356
357 __local_flush_tlb_all(); 357 __local_flush_tlb_all();
358 tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); 358 tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
359 359
360 if (tlb_flag(TLB_BARRIER)) { 360 if (tlb_flag(TLB_BARRIER)) {
361 dsb(); 361 dsb(ish);
362 isb(); 362 isb();
363 } 363 }
364} 364}
@@ -388,13 +388,13 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
388 const unsigned int __tlb_flag = __cpu_tlb_flags; 388 const unsigned int __tlb_flag = __cpu_tlb_flags;
389 389
390 if (tlb_flag(TLB_WB)) 390 if (tlb_flag(TLB_WB))
391 dsb(); 391 dsb(nshst);
392 392
393 __local_flush_tlb_mm(mm); 393 __local_flush_tlb_mm(mm);
394 tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid); 394 tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);
395 395
396 if (tlb_flag(TLB_BARRIER)) 396 if (tlb_flag(TLB_BARRIER))
397 dsb(); 397 dsb(nsh);
398} 398}
399 399
400static inline void __flush_tlb_mm(struct mm_struct *mm) 400static inline void __flush_tlb_mm(struct mm_struct *mm)
@@ -402,7 +402,7 @@ static inline void __flush_tlb_mm(struct mm_struct *mm)
402 const unsigned int __tlb_flag = __cpu_tlb_flags; 402 const unsigned int __tlb_flag = __cpu_tlb_flags;
403 403
404 if (tlb_flag(TLB_WB)) 404 if (tlb_flag(TLB_WB))
405 dsb(); 405 dsb(ishst);
406 406
407 __local_flush_tlb_mm(mm); 407 __local_flush_tlb_mm(mm);
408#ifdef CONFIG_ARM_ERRATA_720789 408#ifdef CONFIG_ARM_ERRATA_720789
@@ -412,7 +412,7 @@ static inline void __flush_tlb_mm(struct mm_struct *mm)
412#endif 412#endif
413 413
414 if (tlb_flag(TLB_BARRIER)) 414 if (tlb_flag(TLB_BARRIER))
415 dsb(); 415 dsb(ish);
416} 416}
417 417
418static inline void 418static inline void
@@ -445,13 +445,13 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
445 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); 445 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
446 446
447 if (tlb_flag(TLB_WB)) 447 if (tlb_flag(TLB_WB))
448 dsb(); 448 dsb(nshst);
449 449
450 __local_flush_tlb_page(vma, uaddr); 450 __local_flush_tlb_page(vma, uaddr);
451 tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr); 451 tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr);
452 452
453 if (tlb_flag(TLB_BARRIER)) 453 if (tlb_flag(TLB_BARRIER))
454 dsb(); 454 dsb(nsh);
455} 455}
456 456
457static inline void 457static inline void
@@ -462,7 +462,7 @@ __flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
462 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); 462 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
463 463
464 if (tlb_flag(TLB_WB)) 464 if (tlb_flag(TLB_WB))
465 dsb(); 465 dsb(ishst);
466 466
467 __local_flush_tlb_page(vma, uaddr); 467 __local_flush_tlb_page(vma, uaddr);
468#ifdef CONFIG_ARM_ERRATA_720789 468#ifdef CONFIG_ARM_ERRATA_720789
@@ -472,7 +472,7 @@ __flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
472#endif 472#endif
473 473
474 if (tlb_flag(TLB_BARRIER)) 474 if (tlb_flag(TLB_BARRIER))
475 dsb(); 475 dsb(ish);
476} 476}
477 477
478static inline void __local_flush_tlb_kernel_page(unsigned long kaddr) 478static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)
@@ -498,13 +498,13 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
498 kaddr &= PAGE_MASK; 498 kaddr &= PAGE_MASK;
499 499
500 if (tlb_flag(TLB_WB)) 500 if (tlb_flag(TLB_WB))
501 dsb(); 501 dsb(nshst);
502 502
503 __local_flush_tlb_kernel_page(kaddr); 503 __local_flush_tlb_kernel_page(kaddr);
504 tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr); 504 tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr);
505 505
506 if (tlb_flag(TLB_BARRIER)) { 506 if (tlb_flag(TLB_BARRIER)) {
507 dsb(); 507 dsb(nsh);
508 isb(); 508 isb();
509 } 509 }
510} 510}
@@ -516,13 +516,13 @@ static inline void __flush_tlb_kernel_page(unsigned long kaddr)
516 kaddr &= PAGE_MASK; 516 kaddr &= PAGE_MASK;
517 517
518 if (tlb_flag(TLB_WB)) 518 if (tlb_flag(TLB_WB))
519 dsb(); 519 dsb(ishst);
520 520
521 __local_flush_tlb_kernel_page(kaddr); 521 __local_flush_tlb_kernel_page(kaddr);
522 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr); 522 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
523 523
524 if (tlb_flag(TLB_BARRIER)) { 524 if (tlb_flag(TLB_BARRIER)) {
525 dsb(); 525 dsb(ish);
526 isb(); 526 isb();
527 } 527 }
528} 528}
@@ -578,7 +578,7 @@ static inline void dummy_flush_tlb_a15_erratum(void)
578 * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. 578 * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
579 */ 579 */
580 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); 580 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
581 dsb(); 581 dsb(ish);
582} 582}
583#else 583#else
584static inline int erratum_a15_798181(void) 584static inline int erratum_a15_798181(void)
@@ -612,7 +612,7 @@ static inline void flush_pmd_entry(void *pmd)
612 tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); 612 tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
613 613
614 if (tlb_flag(TLB_WB)) 614 if (tlb_flag(TLB_WB))
615 dsb(); 615 dsb(ishst);
616} 616}
617 617
618static inline void clean_pmd_entry(void *pmd) 618static inline void clean_pmd_entry(void *pmd)