aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRik van Riel <riel@surriel.com>2018-07-16 15:03:35 -0400
committerIngo Molnar <mingo@kernel.org>2018-07-17 03:35:33 -0400
commit64482aafe55fc7e84d0741c356f8176ee7bde357 (patch)
tree35f803b840221e2cced8735a1b553124ae7f651d
parentac0315896970d8589291e9d8a1569fc65967b7f1 (diff)
x86/mm/tlb: Only send page table free TLB flush to lazy TLB CPUs
CPUs in !is_lazy have either received TLB flush IPIs earlier on during the munmap (when the user memory was unmapped), or have context switched and reloaded during that stage of the munmap. Page table free TLB flushes only need to be sent to CPUs in lazy TLB mode, which TLB contents might not yet be up to date yet. Tested-by: Song Liu <songliubraving@fb.com> Signed-off-by: Rik van Riel <riel@surriel.com> Acked-by: Dave Hansen <dave.hansen@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: efault@gmx.de Cc: kernel-team@fb.com Cc: luto@kernel.org Link: http://lkml.kernel.org/r/20180716190337.26133-6-riel@surriel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/mm/tlb.c43
1 files changed, 39 insertions, 4 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 26542cc17043..e4156e37aa71 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -712,15 +712,50 @@ void tlb_flush_remove_tables_local(void *arg)
712 } 712 }
713} 713}
714 714
715static void mm_fill_lazy_tlb_cpu_mask(struct mm_struct *mm,
716 struct cpumask *lazy_cpus)
717{
718 int cpu;
719
720 for_each_cpu(cpu, mm_cpumask(mm)) {
721 if (!per_cpu(cpu_tlbstate.is_lazy, cpu))
722 cpumask_set_cpu(cpu, lazy_cpus);
723 }
724}
725
715void tlb_flush_remove_tables(struct mm_struct *mm) 726void tlb_flush_remove_tables(struct mm_struct *mm)
716{ 727{
717 int cpu = get_cpu(); 728 int cpu = get_cpu();
729 cpumask_var_t lazy_cpus;
730
731 if (cpumask_any_but(mm_cpumask(mm), cpu) >= nr_cpu_ids) {
732 put_cpu();
733 return;
734 }
735
736 if (!zalloc_cpumask_var(&lazy_cpus, GFP_ATOMIC)) {
737 /*
738 * If the cpumask allocation fails, do a brute force flush
739 * on all the CPUs that have this mm loaded.
740 */
741 smp_call_function_many(mm_cpumask(mm),
742 tlb_flush_remove_tables_local, (void *)mm, 1);
743 put_cpu();
744 return;
745 }
746
718 /* 747 /*
719 * XXX: this really only needs to be called for CPUs in lazy TLB mode. 748 * CPUs with !is_lazy either received a TLB flush IPI while the user
749 * pages in this address range were unmapped, or have context switched
750 * and reloaded %CR3 since then.
751 *
752 * Shootdown IPIs at page table freeing time only need to be sent to
753 * CPUs that may have out of date TLB contents.
720 */ 754 */
721 if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) 755 mm_fill_lazy_tlb_cpu_mask(mm, lazy_cpus);
722 smp_call_function_many(mm_cpumask(mm), tlb_flush_remove_tables_local, (void *)mm, 1); 756 smp_call_function_many(lazy_cpus,
723 757 tlb_flush_remove_tables_local, (void *)mm, 1);
758 free_cpumask_var(lazy_cpus);
724 put_cpu(); 759 put_cpu();
725} 760}
726 761