diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-02-28 10:49:23 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-02-28 10:49:23 -0500 |
| commit | 4b696dcb1a55e40648ad0eec4af991c72f945a85 (patch) | |
| tree | 6783476066ceec0439d612e029cd4eebd7787ea4 | |
| parent | 76c03f0f5d496be6a6b81df5f6e16551c07b3c0a (diff) | |
| parent | 9bf148cb0812595bfdf5100bd2c07e9bec9c6ef5 (diff) | |
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner:
"This update contains:
- Hopefully the last ASM CLAC fixups
- A fix for the Quark family related to the IMR lock which makes
kexec work again
- A off-by-one fix in the MPX code. Ironic, isn't it?
- A fix for X86_PAE which addresses once more an unsigned long vs
phys_addr_t hickup"
* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mpx: Fix off-by-one comparison with nr_registers
x86/mm: Fix slow_virt_to_phys() for X86_PAE again
x86/entry/compat: Add missing CLAC to entry_INT80_32
x86/entry/32: Add an ASM_CLAC to entry_SYSENTER_32
x86/platform/intel/quark: Change the kernel's IMR lock bit to false
| -rw-r--r-- | arch/x86/entry/entry_32.S | 1 | ||||
| -rw-r--r-- | arch/x86/entry/entry_64_compat.S | 1 | ||||
| -rw-r--r-- | arch/x86/mm/mpx.c | 2 | ||||
| -rw-r--r-- | arch/x86/mm/pageattr.c | 14 | ||||
| -rw-r--r-- | arch/x86/platform/intel-quark/imr.c | 4 |
5 files changed, 15 insertions, 7 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 77d8c5112900..bb3e376d0f33 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S | |||
| @@ -294,6 +294,7 @@ sysenter_past_esp: | |||
| 294 | pushl $__USER_DS /* pt_regs->ss */ | 294 | pushl $__USER_DS /* pt_regs->ss */ |
| 295 | pushl %ebp /* pt_regs->sp (stashed in bp) */ | 295 | pushl %ebp /* pt_regs->sp (stashed in bp) */ |
| 296 | pushfl /* pt_regs->flags (except IF = 0) */ | 296 | pushfl /* pt_regs->flags (except IF = 0) */ |
| 297 | ASM_CLAC /* Clear AC after saving FLAGS */ | ||
| 297 | orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ | 298 | orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ |
| 298 | pushl $__USER_CS /* pt_regs->cs */ | 299 | pushl $__USER_CS /* pt_regs->cs */ |
| 299 | pushl $0 /* pt_regs->ip = 0 (placeholder) */ | 300 | pushl $0 /* pt_regs->ip = 0 (placeholder) */ |
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index ff1c6d61f332..3c990eeee40b 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S | |||
| @@ -261,6 +261,7 @@ ENTRY(entry_INT80_compat) | |||
| 261 | * Interrupts are off on entry. | 261 | * Interrupts are off on entry. |
| 262 | */ | 262 | */ |
| 263 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 263 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
| 264 | ASM_CLAC /* Do this early to minimize exposure */ | ||
| 264 | SWAPGS | 265 | SWAPGS |
| 265 | 266 | ||
| 266 | /* | 267 | /* |
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index b2fd67da1701..ef05755a1900 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c | |||
| @@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs, | |||
| 123 | break; | 123 | break; |
| 124 | } | 124 | } |
| 125 | 125 | ||
| 126 | if (regno > nr_registers) { | 126 | if (regno >= nr_registers) { |
| 127 | WARN_ONCE(1, "decoded an instruction with an invalid register"); | 127 | WARN_ONCE(1, "decoded an instruction with an invalid register"); |
| 128 | return -EINVAL; | 128 | return -EINVAL; |
| 129 | } | 129 | } |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 2440814b0069..9cf96d82147a 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
| @@ -419,24 +419,30 @@ pmd_t *lookup_pmd_address(unsigned long address) | |||
| 419 | phys_addr_t slow_virt_to_phys(void *__virt_addr) | 419 | phys_addr_t slow_virt_to_phys(void *__virt_addr) |
| 420 | { | 420 | { |
| 421 | unsigned long virt_addr = (unsigned long)__virt_addr; | 421 | unsigned long virt_addr = (unsigned long)__virt_addr; |
| 422 | unsigned long phys_addr, offset; | 422 | phys_addr_t phys_addr; |
| 423 | unsigned long offset; | ||
| 423 | enum pg_level level; | 424 | enum pg_level level; |
| 424 | pte_t *pte; | 425 | pte_t *pte; |
| 425 | 426 | ||
| 426 | pte = lookup_address(virt_addr, &level); | 427 | pte = lookup_address(virt_addr, &level); |
| 427 | BUG_ON(!pte); | 428 | BUG_ON(!pte); |
| 428 | 429 | ||
| 430 | /* | ||
| 431 | * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t | ||
| 432 | * before being left-shifted PAGE_SHIFT bits -- this trick is to | ||
| 433 | * make 32-PAE kernel work correctly. | ||
| 434 | */ | ||
| 429 | switch (level) { | 435 | switch (level) { |
| 430 | case PG_LEVEL_1G: | 436 | case PG_LEVEL_1G: |
| 431 | phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; | 437 | phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; |
| 432 | offset = virt_addr & ~PUD_PAGE_MASK; | 438 | offset = virt_addr & ~PUD_PAGE_MASK; |
| 433 | break; | 439 | break; |
| 434 | case PG_LEVEL_2M: | 440 | case PG_LEVEL_2M: |
| 435 | phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; | 441 | phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; |
| 436 | offset = virt_addr & ~PMD_PAGE_MASK; | 442 | offset = virt_addr & ~PMD_PAGE_MASK; |
| 437 | break; | 443 | break; |
| 438 | default: | 444 | default: |
| 439 | phys_addr = pte_pfn(*pte) << PAGE_SHIFT; | 445 | phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; |
| 440 | offset = virt_addr & ~PAGE_MASK; | 446 | offset = virt_addr & ~PAGE_MASK; |
| 441 | } | 447 | } |
| 442 | 448 | ||
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c index c61b6c332e97..bfadcd0f4944 100644 --- a/arch/x86/platform/intel-quark/imr.c +++ b/arch/x86/platform/intel-quark/imr.c | |||
| @@ -592,14 +592,14 @@ static void __init imr_fixup_memmap(struct imr_device *idev) | |||
| 592 | end = (unsigned long)__end_rodata - 1; | 592 | end = (unsigned long)__end_rodata - 1; |
| 593 | 593 | ||
| 594 | /* | 594 | /* |
| 595 | * Setup a locked IMR around the physical extent of the kernel | 595 | * Setup an unlocked IMR around the physical extent of the kernel |
| 596 | * from the beginning of the .text secton to the end of the | 596 | * from the beginning of the .text secton to the end of the |
| 597 | * .rodata section as one physically contiguous block. | 597 | * .rodata section as one physically contiguous block. |
| 598 | * | 598 | * |
| 599 | * We don't round up @size since it is already PAGE_SIZE aligned. | 599 | * We don't round up @size since it is already PAGE_SIZE aligned. |
| 600 | * See vmlinux.lds.S for details. | 600 | * See vmlinux.lds.S for details. |
| 601 | */ | 601 | */ |
| 602 | ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); | 602 | ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); |
| 603 | if (ret < 0) { | 603 | if (ret < 0) { |
| 604 | pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n", | 604 | pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n", |
| 605 | size / 1024, start, end); | 605 | size / 1024, start, end); |
