aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2017-04-22 03:01:21 -0400
committerIngo Molnar <mingo@kernel.org>2017-04-26 04:02:06 -0400
commitce27374fabf553153c3f53efcaa9bfab9216bd8c (patch)
treed3284cd99641f4c9f25b028936ca2929fc3b4997
parent29961b59a51f8c6838a26a45e871a7ed6771809b (diff)
x86/mm: Make flush_tlb_mm_range() more predictable
I'm about to rewrite the function almost completely, but first I want to get a functional change out of the way. Currently, if flush_tlb_mm_range() does not flush the local TLB at all, it will never do individual page flushes on remote CPUs. This seems to be an accident, and preserving it will be awkward. Let's change it first so that any regressions in the rewrite will be easier to bisect and so that the rewrite can attempt to change no visible behavior at all. The fix is simple: we can simply avoid short-circuiting the calculation of base_pages_to_flush. As a side effect, this also eliminates a potential corner case: if tlb_single_page_flush_ceiling == TLB_FLUSH_ALL, flush_tlb_mm_range() could have ended up flushing the entire address space one page at a time. Signed-off-by: Andy Lutomirski <luto@kernel.org> Acked-by: Dave Hansen <dave.hansen@intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Nadav Amit <namit@vmware.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/4b29b771d9975aad7154c314534fec235618175a.1492844372.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/mm/tlb.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 92ec37f517ab..9db9260a5e9f 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -309,6 +309,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
309 unsigned long base_pages_to_flush = TLB_FLUSH_ALL; 309 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
310 310
311 preempt_disable(); 311 preempt_disable();
312
313 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
314 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
315 if (base_pages_to_flush > tlb_single_page_flush_ceiling)
316 base_pages_to_flush = TLB_FLUSH_ALL;
317
312 if (current->active_mm != mm) { 318 if (current->active_mm != mm) {
313 /* Synchronize with switch_mm. */ 319 /* Synchronize with switch_mm. */
314 smp_mb(); 320 smp_mb();
@@ -325,15 +331,11 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
325 goto out; 331 goto out;
326 } 332 }
327 333
328 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
329 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
330
331 /* 334 /*
332 * Both branches below are implicit full barriers (MOV to CR or 335 * Both branches below are implicit full barriers (MOV to CR or
333 * INVLPG) that synchronize with switch_mm. 336 * INVLPG) that synchronize with switch_mm.
334 */ 337 */
335 if (base_pages_to_flush > tlb_single_page_flush_ceiling) { 338 if (base_pages_to_flush == TLB_FLUSH_ALL) {
336 base_pages_to_flush = TLB_FLUSH_ALL;
337 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 339 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
338 local_flush_tlb(); 340 local_flush_tlb();
339 } else { 341 } else {