diff options
-rw-r--r-- | arch/x86/include/asm/tlbflush.h | 6 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 2 | ||||
-rw-r--r-- | mm/rmap.c | 28 |
3 files changed, 8 insertions, 28 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index c24b4224d439..1fde8d580a5b 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
@@ -319,12 +319,6 @@ static inline void reset_lazy_tlbstate(void) | |||
319 | 319 | ||
320 | #endif /* SMP */ | 320 | #endif /* SMP */ |
321 | 321 | ||
322 | /* Not inlined due to inc_irq_stat not being defined yet */ | ||
323 | #define flush_tlb_local() { \ | ||
324 | inc_irq_stat(irq_tlb_count); \ | ||
325 | local_flush_tlb(); \ | ||
326 | } | ||
327 | |||
328 | #ifndef CONFIG_PARAVIRT | 322 | #ifndef CONFIG_PARAVIRT |
329 | #define flush_tlb_others(mask, mm, start, end) \ | 323 | #define flush_tlb_others(mask, mm, start, end) \ |
330 | native_flush_tlb_others(mask, mm, start, end) | 324 | native_flush_tlb_others(mask, mm, start, end) |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 5fb6adaaa796..fe9b9f776361 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -104,7 +104,7 @@ static void flush_tlb_func(void *info) | |||
104 | 104 | ||
105 | inc_irq_stat(irq_tlb_count); | 105 | inc_irq_stat(irq_tlb_count); |
106 | 106 | ||
107 | if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) | 107 | if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) |
108 | return; | 108 | return; |
109 | 109 | ||
110 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); | 110 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
@@ -569,19 +569,6 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma) | |||
569 | } | 569 | } |
570 | 570 | ||
571 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH | 571 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
572 | static void percpu_flush_tlb_batch_pages(void *data) | ||
573 | { | ||
574 | /* | ||
575 | * All TLB entries are flushed on the assumption that it is | ||
576 | * cheaper to flush all TLBs and let them be refilled than | ||
577 | * flushing individual PFNs. Note that we do not track mm's | ||
578 | * to flush as that might simply be multiple full TLB flushes | ||
579 | * for no gain. | ||
580 | */ | ||
581 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); | ||
582 | flush_tlb_local(); | ||
583 | } | ||
584 | |||
585 | /* | 572 | /* |
586 | * Flush TLB entries for recently unmapped pages from remote CPUs. It is | 573 | * Flush TLB entries for recently unmapped pages from remote CPUs. It is |
587 | * important if a PTE was dirty when it was unmapped that it's flushed | 574 | * important if a PTE was dirty when it was unmapped that it's flushed |
@@ -598,15 +585,14 @@ void try_to_unmap_flush(void) | |||
598 | 585 | ||
599 | cpu = get_cpu(); | 586 | cpu = get_cpu(); |
600 | 587 | ||
601 | trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL); | 588 | if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) { |
602 | 589 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); | |
603 | if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) | 590 | local_flush_tlb(); |
604 | percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask); | 591 | trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); |
605 | |||
606 | if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) { | ||
607 | smp_call_function_many(&tlb_ubc->cpumask, | ||
608 | percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true); | ||
609 | } | 592 | } |
593 | |||
594 | if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) | ||
595 | flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL); | ||
610 | cpumask_clear(&tlb_ubc->cpumask); | 596 | cpumask_clear(&tlb_ubc->cpumask); |
611 | tlb_ubc->flush_required = false; | 597 | tlb_ubc->flush_required = false; |
612 | tlb_ubc->writable = false; | 598 | tlb_ubc->writable = false; |