aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-12-03 12:03:49 -0500
committerIngo Molnar <mingo@kernel.org>2018-12-17 12:54:26 -0500
commit935f5839827ef54b53406e80906f7c355eb73c1b (patch)
tree7ec68e734244764f504c5f42246185107b9d0e64
parent5fe26b7a8f4693d532c7a3c3632e47e7d7016238 (diff)
x86/mm/cpa: Optimize cpa_flush_array() TLB invalidation
Instead of punting and doing tlb_flush_all(), do the same as flush_tlb_kernel_range() does and use single page invalidations. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.430001980@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/mm/mm_internal.h2
-rw-r--r--arch/x86/mm/pageattr.c42
-rw-r--r--arch/x86/mm/tlb.c4
3 files changed, 29 insertions, 19 deletions
diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h
index 4e1f6e1b8159..319bde386d5f 100644
--- a/arch/x86/mm/mm_internal.h
+++ b/arch/x86/mm/mm_internal.h
@@ -19,4 +19,6 @@ extern int after_bootmem;
19 19
20void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache); 20void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache);
21 21
22extern unsigned long tlb_single_page_flush_ceiling;
23
22#endif /* __X86_MM_INTERNAL_H */ 24#endif /* __X86_MM_INTERNAL_H */
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index afa98b7b6050..351874259a71 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -26,6 +26,8 @@
26#include <asm/pat.h> 26#include <asm/pat.h>
27#include <asm/set_memory.h> 27#include <asm/set_memory.h>
28 28
29#include "mm_internal.h"
30
29/* 31/*
30 * The current flushing context - we pass it instead of 5 arguments: 32 * The current flushing context - we pass it instead of 5 arguments:
31 */ 33 */
@@ -346,16 +348,26 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
346 } 348 }
347} 349}
348 350
349static void cpa_flush_array(unsigned long baddr, unsigned long *start, 351void __cpa_flush_array(void *data)
350 int numpages, int cache,
351 int in_flags, struct page **pages)
352{ 352{
353 unsigned int i, level; 353 struct cpa_data *cpa = data;
354 unsigned int i;
354 355
355 if (__inv_flush_all(cache)) 356 for (i = 0; i < cpa->numpages; i++)
357 __flush_tlb_one_kernel(__cpa_addr(cpa, i));
358}
359
360static void cpa_flush_array(struct cpa_data *cpa, int cache)
361{
362 unsigned int i;
363
364 if (cpa_check_flush_all(cache))
356 return; 365 return;
357 366
358 flush_tlb_all(); 367 if (cpa->numpages <= tlb_single_page_flush_ceiling)
368 on_each_cpu(__cpa_flush_array, cpa, 1);
369 else
370 flush_tlb_all();
359 371
360 if (!cache) 372 if (!cache)
361 return; 373 return;
@@ -366,15 +378,11 @@ static void cpa_flush_array(unsigned long baddr, unsigned long *start,
366 * will cause all other CPUs to flush the same 378 * will cause all other CPUs to flush the same
367 * cachelines: 379 * cachelines:
368 */ 380 */
369 for (i = 0; i < numpages; i++) { 381 for (i = 0; i < cpa->numpages; i++) {
370 unsigned long addr; 382 unsigned long addr = __cpa_addr(cpa, i);
383 unsigned int level;
371 pte_t *pte; 384 pte_t *pte;
372 385
373 if (in_flags & CPA_PAGES_ARRAY)
374 addr = (unsigned long)page_address(pages[i]);
375 else
376 addr = start[i];
377
378 pte = lookup_address(addr, &level); 386 pte = lookup_address(addr, &level);
379 387
380 /* 388 /*
@@ -1771,12 +1779,10 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1771 goto out; 1779 goto out;
1772 } 1780 }
1773 1781
1774 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { 1782 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
1775 cpa_flush_array(baddr, addr, numpages, cache, 1783 cpa_flush_array(&cpa, cache);
1776 cpa.flags, pages); 1784 else
1777 } else {
1778 cpa_flush_range(baddr, numpages, cache); 1785 cpa_flush_range(baddr, numpages, cache);
1779 }
1780 1786
1781out: 1787out:
1782 return ret; 1788 return ret;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 03b6b4c2238d..999d6d8f0bef 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -15,6 +15,8 @@
15#include <asm/apic.h> 15#include <asm/apic.h>
16#include <asm/uv/uv.h> 16#include <asm/uv/uv.h>
17 17
18#include "mm_internal.h"
19
18/* 20/*
19 * TLB flushing, formerly SMP-only 21 * TLB flushing, formerly SMP-only
20 * c/o Linus Torvalds. 22 * c/o Linus Torvalds.
@@ -721,7 +723,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
721 * 723 *
722 * This is in units of pages. 724 * This is in units of pages.
723 */ 725 */
724static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; 726unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
725 727
726void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 728void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
727 unsigned long end, unsigned int stride_shift, 729 unsigned long end, unsigned int stride_shift,