diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-06-05 06:26:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-06-05 21:07:26 -0400 |
commit | 29eb77825cc7da8d45b642de2de3d423dc8a363f (patch) | |
tree | 689c3c9f8b34f8023899ba8c230ea04c4dc21e54 /arch/arm | |
parent | 2c95523c0f0ca7c2f157a07f0c03b6bbba13fee6 (diff) |
arch, mm: Remove tlb_fast_mode()
Since the introduction of preemptible mmu_gather TLB fast mode has been
broken. TLB fast mode relies on there being absolutely no concurrency;
it frees pages first and invalidates TLBs later.
However now we can get concurrency and stuff goes *bang*.
This patch removes all tlb_fast_mode() code; it was found the better
option vs trying to patch the hole by entangling tlb invalidation with
the scheduler.
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Tony Luck <tony.luck@intel.com>
Reported-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/include/asm/tlb.h | 27 |
1 files changed, 4 insertions, 23 deletions
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 99a19512ee26..bdf2b8458ec1 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -33,18 +33,6 @@ | |||
33 | #include <asm/pgalloc.h> | 33 | #include <asm/pgalloc.h> |
34 | #include <asm/tlbflush.h> | 34 | #include <asm/tlbflush.h> |
35 | 35 | ||
36 | /* | ||
37 | * We need to delay page freeing for SMP as other CPUs can access pages | ||
38 | * which have been removed but not yet had their TLB entries invalidated. | ||
39 | * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, | ||
40 | * we need to apply this same delaying tactic to ensure correct operation. | ||
41 | */ | ||
42 | #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) | ||
43 | #define tlb_fast_mode(tlb) 0 | ||
44 | #else | ||
45 | #define tlb_fast_mode(tlb) 1 | ||
46 | #endif | ||
47 | |||
48 | #define MMU_GATHER_BUNDLE 8 | 36 | #define MMU_GATHER_BUNDLE 8 |
49 | 37 | ||
50 | /* | 38 | /* |
@@ -112,12 +100,10 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) | |||
112 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | 100 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
113 | { | 101 | { |
114 | tlb_flush(tlb); | 102 | tlb_flush(tlb); |
115 | if (!tlb_fast_mode(tlb)) { | 103 | free_pages_and_swap_cache(tlb->pages, tlb->nr); |
116 | free_pages_and_swap_cache(tlb->pages, tlb->nr); | 104 | tlb->nr = 0; |
117 | tlb->nr = 0; | 105 | if (tlb->pages == tlb->local) |
118 | if (tlb->pages == tlb->local) | 106 | __tlb_alloc_page(tlb); |
119 | __tlb_alloc_page(tlb); | ||
120 | } | ||
121 | } | 107 | } |
122 | 108 | ||
123 | static inline void | 109 | static inline void |
@@ -178,11 +164,6 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
178 | 164 | ||
179 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | 165 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
180 | { | 166 | { |
181 | if (tlb_fast_mode(tlb)) { | ||
182 | free_page_and_swap_cache(page); | ||
183 | return 1; /* avoid calling tlb_flush_mmu */ | ||
184 | } | ||
185 | |||
186 | tlb->pages[tlb->nr++] = page; | 167 | tlb->pages[tlb->nr++] = page; |
187 | VM_BUG_ON(tlb->nr > tlb->max); | 168 | VM_BUG_ON(tlb->nr > tlb->max); |
188 | return tlb->max - tlb->nr; | 169 | return tlb->max - tlb->nr; |