diff options
author | Will Deacon <will.deacon@arm.com> | 2015-10-06 13:46:26 -0400 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2015-10-07 06:56:21 -0400 |
commit | 5a7862e83000ccfd36db927c6f060458fe271157 (patch) | |
tree | 418e7e0fb2163f8fdc15026832cab1f65a7017ee | |
parent | f3e002c24e1f3b66f6e392ecd6928b5d04672c54 (diff) |
arm64: tlbflush: avoid flushing when fullmm == 1
The TLB gather code sets fullmm=1 when tearing down the entire address
space for an mm_struct on exit or execve. Given that the ASID allocator
will never re-allocate a dirty ASID, this flushing is not needed and can
simply be avoided in the flushing code.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r-- | arch/arm64/include/asm/tlb.h | 26 |
1 files changed, 15 insertions, 11 deletions
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index d6e6b6660380..ffdaea7954bb 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
@@ -37,17 +37,21 @@ static inline void __tlb_remove_table(void *_table) | |||
37 | 37 | ||
38 | static inline void tlb_flush(struct mmu_gather *tlb) | 38 | static inline void tlb_flush(struct mmu_gather *tlb) |
39 | { | 39 | { |
40 | if (tlb->fullmm) { | 40 | struct vm_area_struct vma = { .vm_mm = tlb->mm, }; |
41 | flush_tlb_mm(tlb->mm); | 41 | |
42 | } else { | 42 | /* |
43 | struct vm_area_struct vma = { .vm_mm = tlb->mm, }; | 43 | * The ASID allocator will either invalidate the ASID or mark |
44 | /* | 44 | * it as used. |
45 | * The intermediate page table levels are already handled by | 45 | */ |
46 | * the __(pte|pmd|pud)_free_tlb() functions, so last level | 46 | if (tlb->fullmm) |
47 | * TLBI is sufficient here. | 47 | return; |
48 | */ | 48 | |
49 | __flush_tlb_range(&vma, tlb->start, tlb->end, true); | 49 | /* |
50 | } | 50 | * The intermediate page table levels are already handled by |
51 | * the __(pte|pmd|pud)_free_tlb() functions, so last level | ||
52 | * TLBI is sufficient here. | ||
53 | */ | ||
54 | __flush_tlb_range(&vma, tlb->start, tlb->end, true); | ||
51 | } | 55 | } |
52 | 56 | ||
53 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | 57 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |