aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-12-03 12:03:52 -0500
committerIngo Molnar <mingo@kernel.org>2018-12-17 12:54:29 -0500
commitc38116bb940ae37f51fccd315b420ee5961dcb76 (patch)
treed0d2bf0dccb8f6c2a2460b9f9b77d20af5143642
parentfe0937b24ff5d7b343b9922201e469f9a6009d9d (diff)
x86/mm/cpa: Better use CLFLUSHOPT
Currently we issue an MFENCE before and after flushing a range. This means that if we flush a bunch of single page ranges -- like with the cpa array, we issue a whole bunch of superfluous MFENCEs. Reorgainze the code a little to avoid this. [ mingo: capitalize instructions, tweak changelog and comments. ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.626999883@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/mm/pageattr.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 85ef53b86fa0..7d05149995dc 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -251,15 +251,7 @@ static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
251 * Flushing functions 251 * Flushing functions
252 */ 252 */
253 253
254/** 254static void clflush_cache_range_opt(void *vaddr, unsigned int size)
255 * clflush_cache_range - flush a cache range with clflush
256 * @vaddr: virtual start address
257 * @size: number of bytes to flush
258 *
259 * clflushopt is an unordered instruction which needs fencing with mfence or
260 * sfence to avoid ordering issues.
261 */
262void clflush_cache_range(void *vaddr, unsigned int size)
263{ 255{
264 const unsigned long clflush_size = boot_cpu_data.x86_clflush_size; 256 const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
265 void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1)); 257 void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
@@ -268,11 +260,22 @@ void clflush_cache_range(void *vaddr, unsigned int size)
268 if (p >= vend) 260 if (p >= vend)
269 return; 261 return;
270 262
271 mb();
272
273 for (; p < vend; p += clflush_size) 263 for (; p < vend; p += clflush_size)
274 clflushopt(p); 264 clflushopt(p);
265}
275 266
267/**
268 * clflush_cache_range - flush a cache range with clflush
269 * @vaddr: virtual start address
270 * @size: number of bytes to flush
271 *
272 * CLFLUSHOPT is an unordered instruction which needs fencing with MFENCE or
273 * SFENCE to avoid ordering issues.
274 */
275void clflush_cache_range(void *vaddr, unsigned int size)
276{
277 mb();
278 clflush_cache_range_opt(vaddr, size);
276 mb(); 279 mb();
277} 280}
278EXPORT_SYMBOL_GPL(clflush_cache_range); 281EXPORT_SYMBOL_GPL(clflush_cache_range);
@@ -333,6 +336,7 @@ static void cpa_flush(struct cpa_data *data, int cache)
333 if (!cache) 336 if (!cache)
334 return; 337 return;
335 338
339 mb();
336 for (i = 0; i < cpa->numpages; i++) { 340 for (i = 0; i < cpa->numpages; i++) {
337 unsigned long addr = __cpa_addr(cpa, i); 341 unsigned long addr = __cpa_addr(cpa, i);
338 unsigned int level; 342 unsigned int level;
@@ -343,8 +347,9 @@ static void cpa_flush(struct cpa_data *data, int cache)
343 * Only flush present addresses: 347 * Only flush present addresses:
344 */ 348 */
345 if (pte && (pte_val(*pte) & _PAGE_PRESENT)) 349 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
346 clflush_cache_range((void *)addr, PAGE_SIZE); 350 clflush_cache_range_opt((void *)addr, PAGE_SIZE);
347 } 351 }
352 mb();
348} 353}
349 354
350static bool overlaps(unsigned long r1_start, unsigned long r1_end, 355static bool overlaps(unsigned long r1_start, unsigned long r1_end,