aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-06-05 06:26:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-06-05 21:07:26 -0400
commit29eb77825cc7da8d45b642de2de3d423dc8a363f (patch)
tree689c3c9f8b34f8023899ba8c230ea04c4dc21e54 /include
parent2c95523c0f0ca7c2f157a07f0c03b6bbba13fee6 (diff)
arch, mm: Remove tlb_fast_mode()
Since the introduction of preemptible mmu_gather TLB fast mode has been broken. TLB fast mode relies on there being absolutely no concurrency; it frees pages first and invalidates TLBs later. However now we can get concurrency and stuff goes *bang*. This patch removes all tlb_fast_mode() code; it was found the better option vs trying to patch the hole by entangling tlb invalidation with the scheduler. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Russell King <linux@arm.linux.org.uk> Cc: Tony Luck <tony.luck@intel.com> Reported-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/tlb.h17
1 files changed, 1 insertions, 16 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index b1b1fa6ffffe..13821c339a41 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -97,11 +97,9 @@ struct mmu_gather {
97 unsigned long start; 97 unsigned long start;
98 unsigned long end; 98 unsigned long end;
99 unsigned int need_flush : 1, /* Did free PTEs */ 99 unsigned int need_flush : 1, /* Did free PTEs */
100 fast_mode : 1; /* No batching */
101
102 /* we are in the middle of an operation to clear 100 /* we are in the middle of an operation to clear
103 * a full mm and can make some optimizations */ 101 * a full mm and can make some optimizations */
104 unsigned int fullmm : 1, 102 fullmm : 1,
105 /* we have performed an operation which 103 /* we have performed an operation which
106 * requires a complete flush of the tlb */ 104 * requires a complete flush of the tlb */
107 need_flush_all : 1; 105 need_flush_all : 1;
@@ -114,19 +112,6 @@ struct mmu_gather {
114 112
115#define HAVE_GENERIC_MMU_GATHER 113#define HAVE_GENERIC_MMU_GATHER
116 114
117static inline int tlb_fast_mode(struct mmu_gather *tlb)
118{
119#ifdef CONFIG_SMP
120 return tlb->fast_mode;
121#else
122 /*
123 * For UP we don't need to worry about TLB flush
124 * and page free order so much..
125 */
126 return 1;
127#endif
128}
129
130void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); 115void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
131void tlb_flush_mmu(struct mmu_gather *tlb); 116void tlb_flush_mmu(struct mmu_gather *tlb);
132void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, 117void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,