aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pageattr.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r--arch/x86/mm/pageattr.c50
1 files changed, 25 insertions, 25 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 4f8972311a77..14e6119838a6 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn)
230 230
231#endif 231#endif
232 232
233/*
234 * See set_mce_nospec().
235 *
236 * Machine check recovery code needs to change cache mode of poisoned pages to
237 * UC to avoid speculative access logging another error. But passing the
238 * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a
239 * speculative access. So we cheat and flip the top bit of the address. This
240 * works fine for the code that updates the page tables. But at the end of the
241 * process we need to flush the TLB and cache and the non-canonical address
242 * causes a #GP fault when used by the INVLPG and CLFLUSH instructions.
243 *
244 * But in the common case we already have a canonical address. This code
245 * will fix the top bit if needed and is a no-op otherwise.
246 */
247static inline unsigned long fix_addr(unsigned long addr)
248{
249#ifdef CONFIG_X86_64
250 return (long)(addr << 1) >> 1;
251#else
252 return addr;
253#endif
254}
255
233static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) 256static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
234{ 257{
235 if (cpa->flags & CPA_PAGES_ARRAY) { 258 if (cpa->flags & CPA_PAGES_ARRAY) {
@@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data)
313 unsigned int i; 336 unsigned int i;
314 337
315 for (i = 0; i < cpa->numpages; i++) 338 for (i = 0; i < cpa->numpages; i++)
316 __flush_tlb_one_kernel(__cpa_addr(cpa, i)); 339 __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
317} 340}
318 341
319static void cpa_flush(struct cpa_data *data, int cache) 342static void cpa_flush(struct cpa_data *data, int cache)
@@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache)
347 * Only flush present addresses: 370 * Only flush present addresses:
348 */ 371 */
349 if (pte && (pte_val(*pte) & _PAGE_PRESENT)) 372 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
350 clflush_cache_range_opt((void *)addr, PAGE_SIZE); 373 clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
351 } 374 }
352 mb(); 375 mb();
353} 376}
@@ -1627,29 +1650,6 @@ out:
1627 return ret; 1650 return ret;
1628} 1651}
1629 1652
1630/*
1631 * Machine check recovery code needs to change cache mode of poisoned
1632 * pages to UC to avoid speculative access logging another error. But
1633 * passing the address of the 1:1 mapping to set_memory_uc() is a fine
1634 * way to encourage a speculative access. So we cheat and flip the top
1635 * bit of the address. This works fine for the code that updates the
1636 * page tables. But at the end of the process we need to flush the cache
1637 * and the non-canonical address causes a #GP fault when used by the
1638 * CLFLUSH instruction.
1639 *
1640 * But in the common case we already have a canonical address. This code
1641 * will fix the top bit if needed and is a no-op otherwise.
1642 */
1643static inline unsigned long make_addr_canonical_again(unsigned long addr)
1644{
1645#ifdef CONFIG_X86_64
1646 return (long)(addr << 1) >> 1;
1647#else
1648 return addr;
1649#endif
1650}
1651
1652
1653static int change_page_attr_set_clr(unsigned long *addr, int numpages, 1653static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1654 pgprot_t mask_set, pgprot_t mask_clr, 1654 pgprot_t mask_set, pgprot_t mask_clr,
1655 int force_split, int in_flag, 1655 int force_split, int in_flag,