aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-12-03 12:03:51 -0500
committerIngo Molnar <mingo@kernel.org>2018-12-17 12:54:28 -0500
commitfe0937b24ff5d7b343b9922201e469f9a6009d9d (patch)
treecf1135d30172decce155f63d648bb4e27fd8e1f2
parent83b4e39146aa70913580966e0f2b78b7c3492760 (diff)
x86/mm/cpa: Fold cpa_flush_range() and cpa_flush_array() into a single cpa_flush() function
Note that the cache flush loop in cpa_flush_*() is identical when we use __cpa_addr(); further observe that flush_tlb_kernel_range() is a special case of to the cpa_flush_array() TLB invalidation code. This then means the two functions are virtually identical. Fold these two functions into a single cpa_flush() call. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.559855600@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/mm/pageattr.c92
1 files changed, 18 insertions, 74 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 12b69263e501..85ef53b86fa0 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -304,51 +304,7 @@ static void cpa_flush_all(unsigned long cache)
304 on_each_cpu(__cpa_flush_all, (void *) cache, 1); 304 on_each_cpu(__cpa_flush_all, (void *) cache, 1);
305} 305}
306 306
307static bool __inv_flush_all(int cache) 307void __cpa_flush_tlb(void *data)
308{
309 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
310
311 if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
312 cpa_flush_all(cache);
313 return true;
314 }
315
316 return false;
317}
318
319static void cpa_flush_range(unsigned long start, int numpages, int cache)
320{
321 unsigned int i, level;
322 unsigned long addr;
323
324 WARN_ON(PAGE_ALIGN(start) != start);
325
326 if (__inv_flush_all(cache))
327 return;
328
329 flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages);
330
331 if (!cache)
332 return;
333
334 /*
335 * We only need to flush on one CPU,
336 * clflush is a MESI-coherent instruction that
337 * will cause all other CPUs to flush the same
338 * cachelines:
339 */
340 for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
341 pte_t *pte = lookup_address(addr, &level);
342
343 /*
344 * Only flush present addresses:
345 */
346 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
347 clflush_cache_range((void *) addr, PAGE_SIZE);
348 }
349}
350
351void __cpa_flush_array(void *data)
352{ 308{
353 struct cpa_data *cpa = data; 309 struct cpa_data *cpa = data;
354 unsigned int i; 310 unsigned int i;
@@ -357,33 +313,31 @@ void __cpa_flush_array(void *data)
357 __flush_tlb_one_kernel(__cpa_addr(cpa, i)); 313 __flush_tlb_one_kernel(__cpa_addr(cpa, i));
358} 314}
359 315
360static void cpa_flush_array(struct cpa_data *cpa, int cache) 316static void cpa_flush(struct cpa_data *data, int cache)
361{ 317{
318 struct cpa_data *cpa = data;
362 unsigned int i; 319 unsigned int i;
363 320
364 if (cpa_check_flush_all(cache)) 321 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
322
323 if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
324 cpa_flush_all(cache);
365 return; 325 return;
326 }
366 327
367 if (cpa->numpages <= tlb_single_page_flush_ceiling) 328 if (cpa->numpages <= tlb_single_page_flush_ceiling)
368 on_each_cpu(__cpa_flush_array, cpa, 1); 329 on_each_cpu(__cpa_flush_tlb, cpa, 1);
369 else 330 else
370 flush_tlb_all(); 331 flush_tlb_all();
371 332
372 if (!cache) 333 if (!cache)
373 return; 334 return;
374 335
375 /*
376 * We only need to flush on one CPU,
377 * clflush is a MESI-coherent instruction that
378 * will cause all other CPUs to flush the same
379 * cachelines:
380 */
381 for (i = 0; i < cpa->numpages; i++) { 336 for (i = 0; i < cpa->numpages; i++) {
382 unsigned long addr = __cpa_addr(cpa, i); 337 unsigned long addr = __cpa_addr(cpa, i);
383 unsigned int level; 338 unsigned int level;
384 pte_t *pte;
385 339
386 pte = lookup_address(addr, &level); 340 pte_t *pte = lookup_address(addr, &level);
387 341
388 /* 342 /*
389 * Only flush present addresses: 343 * Only flush present addresses:
@@ -1698,7 +1652,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1698{ 1652{
1699 struct cpa_data cpa; 1653 struct cpa_data cpa;
1700 int ret, cache, checkalias; 1654 int ret, cache, checkalias;
1701 unsigned long baddr = 0;
1702 1655
1703 memset(&cpa, 0, sizeof(cpa)); 1656 memset(&cpa, 0, sizeof(cpa));
1704 1657
@@ -1732,11 +1685,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1732 */ 1685 */
1733 WARN_ON_ONCE(1); 1686 WARN_ON_ONCE(1);
1734 } 1687 }
1735 /*
1736 * Save address for cache flush. *addr is modified in the call
1737 * to __change_page_attr_set_clr() below.
1738 */
1739 baddr = make_addr_canonical_again(*addr);
1740 } 1688 }
1741 1689
1742 /* Must avoid aliasing mappings in the highmem code */ 1690 /* Must avoid aliasing mappings in the highmem code */
@@ -1784,11 +1732,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1784 goto out; 1732 goto out;
1785 } 1733 }
1786 1734
1787 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) 1735 cpa_flush(&cpa, cache);
1788 cpa_flush_array(&cpa, cache);
1789 else
1790 cpa_flush_range(baddr, numpages, cache);
1791
1792out: 1736out:
1793 return ret; 1737 return ret;
1794} 1738}
@@ -2097,18 +2041,18 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
2097 /* 2041 /*
2098 * Before changing the encryption attribute, we need to flush caches. 2042 * Before changing the encryption attribute, we need to flush caches.
2099 */ 2043 */
2100 cpa_flush_range(addr, numpages, 1); 2044 cpa_flush(&cpa, 1);
2101 2045
2102 ret = __change_page_attr_set_clr(&cpa, 1); 2046 ret = __change_page_attr_set_clr(&cpa, 1);
2103 2047
2104 /* 2048 /*
2105 * After changing the encryption attribute, we need to flush TLBs 2049 * After changing the encryption attribute, we need to flush TLBs again
2106 * again in case any speculative TLB caching occurred (but no need 2050 * in case any speculative TLB caching occurred (but no need to flush
2107 * to flush caches again). We could just use cpa_flush_all(), but 2051 * caches again). We could just use cpa_flush_all(), but in case TLB
2108 * in case TLB flushing gets optimized in the cpa_flush_range() 2052 * flushing gets optimized in the cpa_flush() path use the same logic
2109 * path use the same logic as above. 2053 * as above.
2110 */ 2054 */
2111 cpa_flush_range(addr, numpages, 0); 2055 cpa_flush(&cpa, 0);
2112 2056
2113 return ret; 2057 return ret;
2114} 2058}