diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-06-05 06:26:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-06-05 21:07:26 -0400 |
commit | 29eb77825cc7da8d45b642de2de3d423dc8a363f (patch) | |
tree | 689c3c9f8b34f8023899ba8c230ea04c4dc21e54 /mm/memory.c | |
parent | 2c95523c0f0ca7c2f157a07f0c03b6bbba13fee6 (diff) |
arch, mm: Remove tlb_fast_mode()
Since the introduction of preemptible mmu_gather TLB fast mode has been
broken. TLB fast mode relies on there being absolutely no concurrency;
it frees pages first and invalidates TLBs later.
However now we can get concurrency and stuff goes *bang*.
This patch removes all tlb_fast_mode() code; it was found the better
option vs trying to patch the hole by entangling tlb invalidation with
the scheduler.
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Tony Luck <tony.luck@intel.com>
Reported-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 9 |
1 files changed, 0 insertions, 9 deletions
diff --git a/mm/memory.c b/mm/memory.c index 6dc1882fbd72..61a262b08e53 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -220,7 +220,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) | |||
220 | tlb->start = -1UL; | 220 | tlb->start = -1UL; |
221 | tlb->end = 0; | 221 | tlb->end = 0; |
222 | tlb->need_flush = 0; | 222 | tlb->need_flush = 0; |
223 | tlb->fast_mode = (num_possible_cpus() == 1); | ||
224 | tlb->local.next = NULL; | 223 | tlb->local.next = NULL; |
225 | tlb->local.nr = 0; | 224 | tlb->local.nr = 0; |
226 | tlb->local.max = ARRAY_SIZE(tlb->__pages); | 225 | tlb->local.max = ARRAY_SIZE(tlb->__pages); |
@@ -244,9 +243,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb) | |||
244 | tlb_table_flush(tlb); | 243 | tlb_table_flush(tlb); |
245 | #endif | 244 | #endif |
246 | 245 | ||
247 | if (tlb_fast_mode(tlb)) | ||
248 | return; | ||
249 | |||
250 | for (batch = &tlb->local; batch; batch = batch->next) { | 246 | for (batch = &tlb->local; batch; batch = batch->next) { |
251 | free_pages_and_swap_cache(batch->pages, batch->nr); | 247 | free_pages_and_swap_cache(batch->pages, batch->nr); |
252 | batch->nr = 0; | 248 | batch->nr = 0; |
@@ -288,11 +284,6 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
288 | 284 | ||
289 | VM_BUG_ON(!tlb->need_flush); | 285 | VM_BUG_ON(!tlb->need_flush); |
290 | 286 | ||
291 | if (tlb_fast_mode(tlb)) { | ||
292 | free_page_and_swap_cache(page); | ||
293 | return 1; /* avoid calling tlb_flush_mmu() */ | ||
294 | } | ||
295 | |||
296 | batch = tlb->active; | 287 | batch = tlb->active; |
297 | batch->pages[batch->nr++] = page; | 288 | batch->pages[batch->nr++] = page; |
298 | if (batch->nr == batch->max) { | 289 | if (batch->nr == batch->max) { |