aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNadav Amit <namit@vmware.com>2016-04-01 17:31:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-04-01 18:03:37 -0400
commit858eaaa711700ce4595e039441e239e56d7b9514 (patch)
treeb126024f3245f28f163088c111fc1942322d2239
parent18c98243ddf05a1827ad2c359c5ac051101e7ff7 (diff)
mm/rmap: batched invalidations should use existing api
The recently introduced batched invalidations mechanism uses its own mechanism for shootdown. However, it does wrong accounting of interrupts (e.g., inc_irq_stat is called for local invalidations), trace-points (e.g., TLB_REMOTE_SHOOTDOWN for local invalidations) and may break some platforms as it bypasses the invalidation mechanisms of Xen and SGI UV. This patch reuses the existing TLB flushing mechnaisms instead. We use NULL as mm to indicate a global invalidation is required. Fixes 72b252aed506b8 ("mm: send one IPI per CPU to TLB flush all entries after unmapping pages") Signed-off-by: Nadav Amit <namit@vmware.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Ingo Molnar <mingo@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/include/asm/tlbflush.h6
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--mm/rmap.c28
3 files changed, 8 insertions, 28 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index c24b4224d439..1fde8d580a5b 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -319,12 +319,6 @@ static inline void reset_lazy_tlbstate(void)
319 319
320#endif /* SMP */ 320#endif /* SMP */
321 321
322/* Not inlined due to inc_irq_stat not being defined yet */
323#define flush_tlb_local() { \
324 inc_irq_stat(irq_tlb_count); \
325 local_flush_tlb(); \
326}
327
328#ifndef CONFIG_PARAVIRT 322#ifndef CONFIG_PARAVIRT
329#define flush_tlb_others(mask, mm, start, end) \ 323#define flush_tlb_others(mask, mm, start, end) \
330 native_flush_tlb_others(mask, mm, start, end) 324 native_flush_tlb_others(mask, mm, start, end)
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 5fb6adaaa796..fe9b9f776361 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -104,7 +104,7 @@ static void flush_tlb_func(void *info)
104 104
105 inc_irq_stat(irq_tlb_count); 105 inc_irq_stat(irq_tlb_count);
106 106
107 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) 107 if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
108 return; 108 return;
109 109
110 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 110 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
diff --git a/mm/rmap.c b/mm/rmap.c
index c399a0d41b31..395e314b7996 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -569,19 +569,6 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
569} 569}
570 570
571#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 571#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
572static void percpu_flush_tlb_batch_pages(void *data)
573{
574 /*
575 * All TLB entries are flushed on the assumption that it is
576 * cheaper to flush all TLBs and let them be refilled than
577 * flushing individual PFNs. Note that we do not track mm's
578 * to flush as that might simply be multiple full TLB flushes
579 * for no gain.
580 */
581 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
582 flush_tlb_local();
583}
584
585/* 572/*
586 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 573 * Flush TLB entries for recently unmapped pages from remote CPUs. It is
587 * important if a PTE was dirty when it was unmapped that it's flushed 574 * important if a PTE was dirty when it was unmapped that it's flushed
@@ -598,15 +585,14 @@ void try_to_unmap_flush(void)
598 585
599 cpu = get_cpu(); 586 cpu = get_cpu();
600 587
601 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL); 588 if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
602 589 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
603 if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) 590 local_flush_tlb();
604 percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask); 591 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
605
606 if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
607 smp_call_function_many(&tlb_ubc->cpumask,
608 percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
609 } 592 }
593
594 if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
595 flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
610 cpumask_clear(&tlb_ubc->cpumask); 596 cpumask_clear(&tlb_ubc->cpumask);
611 tlb_ubc->flush_required = false; 597 tlb_ubc->flush_required = false;
612 tlb_ubc->writable = false; 598 tlb_ubc->writable = false;