aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/tlb.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/tlb.h')
-rw-r--r--arch/arm/include/asm/tlb.h27
1 files changed, 4 insertions, 23 deletions
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 99a19512ee26..bdf2b8458ec1 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -33,18 +33,6 @@
33#include <asm/pgalloc.h> 33#include <asm/pgalloc.h>
34#include <asm/tlbflush.h> 34#include <asm/tlbflush.h>
35 35
36/*
37 * We need to delay page freeing for SMP as other CPUs can access pages
38 * which have been removed but not yet had their TLB entries invalidated.
39 * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
40 * we need to apply this same delaying tactic to ensure correct operation.
41 */
42#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
43#define tlb_fast_mode(tlb) 0
44#else
45#define tlb_fast_mode(tlb) 1
46#endif
47
48#define MMU_GATHER_BUNDLE 8 36#define MMU_GATHER_BUNDLE 8
49 37
50/* 38/*
@@ -112,12 +100,10 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
112static inline void tlb_flush_mmu(struct mmu_gather *tlb) 100static inline void tlb_flush_mmu(struct mmu_gather *tlb)
113{ 101{
114 tlb_flush(tlb); 102 tlb_flush(tlb);
115 if (!tlb_fast_mode(tlb)) { 103 free_pages_and_swap_cache(tlb->pages, tlb->nr);
116 free_pages_and_swap_cache(tlb->pages, tlb->nr); 104 tlb->nr = 0;
117 tlb->nr = 0; 105 if (tlb->pages == tlb->local)
118 if (tlb->pages == tlb->local) 106 __tlb_alloc_page(tlb);
119 __tlb_alloc_page(tlb);
120 }
121} 107}
122 108
123static inline void 109static inline void
@@ -178,11 +164,6 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
178 164
179static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) 165static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
180{ 166{
181 if (tlb_fast_mode(tlb)) {
182 free_page_and_swap_cache(page);
183 return 1; /* avoid calling tlb_flush_mmu */
184 }
185
186 tlb->pages[tlb->nr++] = page; 167 tlb->pages[tlb->nr++] = page;
187 VM_BUG_ON(tlb->nr > tlb->max); 168 VM_BUG_ON(tlb->nr > tlb->max);
188 return tlb->max - tlb->nr; 169 return tlb->max - tlb->nr;