diff options
author | Rik van Riel <riel@surriel.com> | 2018-09-25 23:58:42 -0400 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2018-10-09 10:51:12 -0400 |
commit | 016c4d92cd16f569c6485ae62b076c1a4b779536 (patch) | |
tree | b734bacfc71fe52ac281da272536d5156ec8a252 | |
parent | 7d49b28a80b830c3ca876d33bedc58d62a78e16f (diff) |
x86/mm/tlb: Add freed_tables argument to flush_tlb_mm_range
Add an argument to flush_tlb_mm_range to indicate whether page tables
are about to be freed after this TLB flush. This allows for an
optimization of flush_tlb_mm_range to skip CPUs in lazy TLB mode.
No functional changes.
Cc: npiggin@gmail.com
Cc: mingo@kernel.org
Cc: will.deacon@arm.com
Cc: songliubraving@fb.com
Cc: kernel-team@fb.com
Cc: luto@kernel.org
Cc: hpa@zytor.com
Signed-off-by: Rik van Riel <riel@surriel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20180926035844.1420-6-riel@surriel.com
-rw-r--r-- | arch/x86/include/asm/tlb.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/tlbflush.h | 10 | ||||
-rw-r--r-- | arch/x86/kernel/ldt.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/vm86_32.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 3 |
5 files changed, 11 insertions, 8 deletions
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index afbe7d1e68cf..404b8b1d44f5 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h | |||
@@ -20,7 +20,7 @@ static inline void tlb_flush(struct mmu_gather *tlb) | |||
20 | end = tlb->end; | 20 | end = tlb->end; |
21 | } | 21 | } |
22 | 22 | ||
23 | flush_tlb_mm_range(tlb->mm, start, end, stride_shift); | 23 | flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables); |
24 | } | 24 | } |
25 | 25 | ||
26 | /* | 26 | /* |
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index d6c0cd9e9591..1dea9860ce5b 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
@@ -536,22 +536,24 @@ struct flush_tlb_info { | |||
536 | 536 | ||
537 | #define local_flush_tlb() __flush_tlb() | 537 | #define local_flush_tlb() __flush_tlb() |
538 | 538 | ||
539 | #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) | 539 | #define flush_tlb_mm(mm) \ |
540 | flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true) | ||
540 | 541 | ||
541 | #define flush_tlb_range(vma, start, end) \ | 542 | #define flush_tlb_range(vma, start, end) \ |
542 | flush_tlb_mm_range((vma)->vm_mm, start, end, \ | 543 | flush_tlb_mm_range((vma)->vm_mm, start, end, \ |
543 | ((vma)->vm_flags & VM_HUGETLB) \ | 544 | ((vma)->vm_flags & VM_HUGETLB) \ |
544 | ? huge_page_shift(hstate_vma(vma)) \ | 545 | ? huge_page_shift(hstate_vma(vma)) \ |
545 | : PAGE_SHIFT) | 546 | : PAGE_SHIFT, false) |
546 | 547 | ||
547 | extern void flush_tlb_all(void); | 548 | extern void flush_tlb_all(void); |
548 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | 549 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
549 | unsigned long end, unsigned int stride_shift); | 550 | unsigned long end, unsigned int stride_shift, |
551 | bool freed_tables); | ||
550 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | 552 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
551 | 553 | ||
552 | static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) | 554 | static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) |
553 | { | 555 | { |
554 | flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT); | 556 | flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); |
555 | } | 557 | } |
556 | 558 | ||
557 | void native_flush_tlb_others(const struct cpumask *cpumask, | 559 | void native_flush_tlb_others(const struct cpumask *cpumask, |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 7fdb2414ca65..ab18e0884dc6 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -273,7 +273,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
273 | map_ldt_struct_to_user(mm); | 273 | map_ldt_struct_to_user(mm); |
274 | 274 | ||
275 | va = (unsigned long)ldt_slot_va(slot); | 275 | va = (unsigned long)ldt_slot_va(slot); |
276 | flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT); | 276 | flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT, false); |
277 | 277 | ||
278 | ldt->slot = slot; | 278 | ldt->slot = slot; |
279 | return 0; | 279 | return 0; |
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 52fed70f671e..c2fd39752da8 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
@@ -199,7 +199,7 @@ static void mark_screen_rdonly(struct mm_struct *mm) | |||
199 | pte_unmap_unlock(pte, ptl); | 199 | pte_unmap_unlock(pte, ptl); |
200 | out: | 200 | out: |
201 | up_write(&mm->mmap_sem); | 201 | up_write(&mm->mmap_sem); |
202 | flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT); | 202 | flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false); |
203 | } | 203 | } |
204 | 204 | ||
205 | 205 | ||
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 9fb30d27854b..14bf39fc0447 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -609,7 +609,8 @@ void native_flush_tlb_others(const struct cpumask *cpumask, | |||
609 | static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; | 609 | static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; |
610 | 610 | ||
611 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | 611 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
612 | unsigned long end, unsigned int stride_shift) | 612 | unsigned long end, unsigned int stride_shift, |
613 | bool freed_tables) | ||
613 | { | 614 | { |
614 | int cpu; | 615 | int cpu; |
615 | 616 | ||