diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-02-10 12:57:42 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-02-10 12:57:42 -0500 |
commit | aadaa8061189a9e5d8a1327b328453d663e8cbc9 (patch) | |
tree | 8ca6a12e3512e5f784432991f38c43200e569b76 | |
parent | 73a4c52184141943146ebbf07de4dca27141f21c (diff) | |
parent | 20e55bc17dd01f13cec0eb17e76e9511b23963ef (diff) |
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar:
"A handful of fixes:
- Fix an MCE corner case bug/crash found via MCE injection testing
- Fix 5-level paging boot crash
- Fix MCE recovery cache invalidation bug
- Fix regression on Xen guests caused by a recent PMD level mremap
speedup optimization"
* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mm: Make set_pmd_at() paravirt aware
x86/mm/cpa: Fix set_mce_nospec()
x86/boot/compressed/64: Do not corrupt EDX on EFER.LME=1 setting
x86/MCE: Initialize mce.bank in the case of a fatal error in mce_no_way_out()
-rw-r--r-- | arch/x86/boot/compressed/head_64.S | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mce/core.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 50 |
4 files changed, 29 insertions, 26 deletions
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index f105ae8651c9..f62e347862cc 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S | |||
@@ -602,10 +602,12 @@ ENTRY(trampoline_32bit_src) | |||
602 | 3: | 602 | 3: |
603 | /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ | 603 | /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ |
604 | pushl %ecx | 604 | pushl %ecx |
605 | pushl %edx | ||
605 | movl $MSR_EFER, %ecx | 606 | movl $MSR_EFER, %ecx |
606 | rdmsr | 607 | rdmsr |
607 | btsl $_EFER_LME, %eax | 608 | btsl $_EFER_LME, %eax |
608 | wrmsr | 609 | wrmsr |
610 | popl %edx | ||
609 | popl %ecx | 611 | popl %ecx |
610 | 612 | ||
611 | /* Enable PAE and LA57 (if required) paging modes */ | 613 | /* Enable PAE and LA57 (if required) paging modes */ |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 40616e805292..2779ace16d23 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -1065,7 +1065,7 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
1065 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | 1065 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
1066 | pmd_t *pmdp, pmd_t pmd) | 1066 | pmd_t *pmdp, pmd_t pmd) |
1067 | { | 1067 | { |
1068 | native_set_pmd(pmdp, pmd); | 1068 | set_pmd(pmdp, pmd); |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, | 1071 | static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, |
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 672c7225cb1b..6ce290c506d9 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c | |||
@@ -784,6 +784,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, | |||
784 | quirk_no_way_out(i, m, regs); | 784 | quirk_no_way_out(i, m, regs); |
785 | 785 | ||
786 | if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { | 786 | if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { |
787 | m->bank = i; | ||
787 | mce_read_aux(m, i); | 788 | mce_read_aux(m, i); |
788 | *msg = tmp; | 789 | *msg = tmp; |
789 | return 1; | 790 | return 1; |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 4f8972311a77..14e6119838a6 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn) | |||
230 | 230 | ||
231 | #endif | 231 | #endif |
232 | 232 | ||
233 | /* | ||
234 | * See set_mce_nospec(). | ||
235 | * | ||
236 | * Machine check recovery code needs to change cache mode of poisoned pages to | ||
237 | * UC to avoid speculative access logging another error. But passing the | ||
238 | * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a | ||
239 | * speculative access. So we cheat and flip the top bit of the address. This | ||
240 | * works fine for the code that updates the page tables. But at the end of the | ||
241 | * process we need to flush the TLB and cache and the non-canonical address | ||
242 | * causes a #GP fault when used by the INVLPG and CLFLUSH instructions. | ||
243 | * | ||
244 | * But in the common case we already have a canonical address. This code | ||
245 | * will fix the top bit if needed and is a no-op otherwise. | ||
246 | */ | ||
247 | static inline unsigned long fix_addr(unsigned long addr) | ||
248 | { | ||
249 | #ifdef CONFIG_X86_64 | ||
250 | return (long)(addr << 1) >> 1; | ||
251 | #else | ||
252 | return addr; | ||
253 | #endif | ||
254 | } | ||
255 | |||
233 | static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) | 256 | static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) |
234 | { | 257 | { |
235 | if (cpa->flags & CPA_PAGES_ARRAY) { | 258 | if (cpa->flags & CPA_PAGES_ARRAY) { |
@@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data) | |||
313 | unsigned int i; | 336 | unsigned int i; |
314 | 337 | ||
315 | for (i = 0; i < cpa->numpages; i++) | 338 | for (i = 0; i < cpa->numpages; i++) |
316 | __flush_tlb_one_kernel(__cpa_addr(cpa, i)); | 339 | __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i))); |
317 | } | 340 | } |
318 | 341 | ||
319 | static void cpa_flush(struct cpa_data *data, int cache) | 342 | static void cpa_flush(struct cpa_data *data, int cache) |
@@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache) | |||
347 | * Only flush present addresses: | 370 | * Only flush present addresses: |
348 | */ | 371 | */ |
349 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) | 372 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
350 | clflush_cache_range_opt((void *)addr, PAGE_SIZE); | 373 | clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE); |
351 | } | 374 | } |
352 | mb(); | 375 | mb(); |
353 | } | 376 | } |
@@ -1627,29 +1650,6 @@ out: | |||
1627 | return ret; | 1650 | return ret; |
1628 | } | 1651 | } |
1629 | 1652 | ||
1630 | /* | ||
1631 | * Machine check recovery code needs to change cache mode of poisoned | ||
1632 | * pages to UC to avoid speculative access logging another error. But | ||
1633 | * passing the address of the 1:1 mapping to set_memory_uc() is a fine | ||
1634 | * way to encourage a speculative access. So we cheat and flip the top | ||
1635 | * bit of the address. This works fine for the code that updates the | ||
1636 | * page tables. But at the end of the process we need to flush the cache | ||
1637 | * and the non-canonical address causes a #GP fault when used by the | ||
1638 | * CLFLUSH instruction. | ||
1639 | * | ||
1640 | * But in the common case we already have a canonical address. This code | ||
1641 | * will fix the top bit if needed and is a no-op otherwise. | ||
1642 | */ | ||
1643 | static inline unsigned long make_addr_canonical_again(unsigned long addr) | ||
1644 | { | ||
1645 | #ifdef CONFIG_X86_64 | ||
1646 | return (long)(addr << 1) >> 1; | ||
1647 | #else | ||
1648 | return addr; | ||
1649 | #endif | ||
1650 | } | ||
1651 | |||
1652 | |||
1653 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, | 1653 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, |
1654 | pgprot_t mask_set, pgprot_t mask_clr, | 1654 | pgprot_t mask_set, pgprot_t mask_clr, |
1655 | int force_split, int in_flag, | 1655 | int force_split, int in_flag, |