diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-11-11 17:41:50 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-11-11 17:41:50 -0500 |
| commit | b6df7b6db1c1837fc9eada74ed561cbfe7746755 (patch) | |
| tree | d66f19e700f9c4d5be71005097e3fc558d555d57 | |
| parent | 655c6b977755594d104d2b87d3e896bd09fd34e8 (diff) | |
| parent | 15035388439f892017d38b05214d3cda6578af64 (diff) | |
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner:
"A set of x86 fixes:
- Cure the LDT remapping to user space on 5 level paging which ended
up in the KASLR space
- Remove LDT mapping before freeing the LDT pages
- Make NFIT MCE handling more robust
- Unbreak the VSMP build by removing the dependency on paravirt ops
- Support broken PIT emulation on Microsoft hyperV
- Don't trace vmware_sched_clock() to avoid tracer recursion
- Remove -pipe from KBUILD CFLAGS which breaks clang and is also
slower on GCC
- Trivial coding style and typo fixes"
* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/cpu/vmware: Do not trace vmware_sched_clock()
x86/vsmp: Remove dependency on pv_irq_ops
x86/ldt: Remove unused variable in map_ldt_struct()
x86/ldt: Unmap PTEs for the slot before freeing LDT pages
x86/mm: Move LDT remap out of KASLR region on 5-level paging
acpi/nfit, x86/mce: Validate a MCE's address before using it
acpi/nfit, x86/mce: Handle only uncorrectable machine checks
x86/build: Remove -pipe from KBUILD_CFLAGS
x86/hyper-v: Fix indentation in hv_do_fast_hypercall16()
Documentation/x86: Fix typo in zero-page.txt
x86/hyper-v: Enable PIT shutdown quirk
clockevents/drivers/i8253: Add support for PIT shutdown quirk
| -rw-r--r-- | Documentation/x86/x86_64/mm.txt | 34 | ||||
| -rw-r--r-- | Documentation/x86/zero-page.txt | 2 | ||||
| -rw-r--r-- | arch/x86/Kconfig | 1 | ||||
| -rw-r--r-- | arch/x86/Makefile | 4 | ||||
| -rw-r--r-- | arch/x86/include/asm/mce.h | 2 | ||||
| -rw-r--r-- | arch/x86/include/asm/mshyperv.h | 2 | ||||
| -rw-r--r-- | arch/x86/include/asm/page_64_types.h | 12 | ||||
| -rw-r--r-- | arch/x86/include/asm/pgtable_64_types.h | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 6 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mshyperv.c | 11 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/vmware.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/ldt.c | 59 | ||||
| -rw-r--r-- | arch/x86/kernel/vsmp_64.c | 84 | ||||
| -rw-r--r-- | arch/x86/xen/mmu_pv.c | 6 | ||||
| -rw-r--r-- | drivers/acpi/nfit/mce.c | 8 | ||||
| -rw-r--r-- | drivers/clocksource/i8253.c | 14 | ||||
| -rw-r--r-- | include/linux/i8253.h | 1 |
17 files changed, 114 insertions, 138 deletions
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index 73aaaa3da436..804f9426ed17 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt | |||
| @@ -34,23 +34,24 @@ __________________|____________|__________________|_________|___________________ | |||
| 34 | ____________________________________________________________|___________________________________________________________ | 34 | ____________________________________________________________|___________________________________________________________ |
| 35 | | | | | | 35 | | | | | |
| 36 | ffff800000000000 | -128 TB | ffff87ffffffffff | 8 TB | ... guard hole, also reserved for hypervisor | 36 | ffff800000000000 | -128 TB | ffff87ffffffffff | 8 TB | ... guard hole, also reserved for hypervisor |
| 37 | ffff880000000000 | -120 TB | ffffc7ffffffffff | 64 TB | direct mapping of all physical memory (page_offset_base) | 37 | ffff880000000000 | -120 TB | ffff887fffffffff | 0.5 TB | LDT remap for PTI |
| 38 | ffffc80000000000 | -56 TB | ffffc8ffffffffff | 1 TB | ... unused hole | 38 | ffff888000000000 | -119.5 TB | ffffc87fffffffff | 64 TB | direct mapping of all physical memory (page_offset_base) |
| 39 | ffffc88000000000 | -55.5 TB | ffffc8ffffffffff | 0.5 TB | ... unused hole | ||
| 39 | ffffc90000000000 | -55 TB | ffffe8ffffffffff | 32 TB | vmalloc/ioremap space (vmalloc_base) | 40 | ffffc90000000000 | -55 TB | ffffe8ffffffffff | 32 TB | vmalloc/ioremap space (vmalloc_base) |
| 40 | ffffe90000000000 | -23 TB | ffffe9ffffffffff | 1 TB | ... unused hole | 41 | ffffe90000000000 | -23 TB | ffffe9ffffffffff | 1 TB | ... unused hole |
| 41 | ffffea0000000000 | -22 TB | ffffeaffffffffff | 1 TB | virtual memory map (vmemmap_base) | 42 | ffffea0000000000 | -22 TB | ffffeaffffffffff | 1 TB | virtual memory map (vmemmap_base) |
| 42 | ffffeb0000000000 | -21 TB | ffffebffffffffff | 1 TB | ... unused hole | 43 | ffffeb0000000000 | -21 TB | ffffebffffffffff | 1 TB | ... unused hole |
| 43 | ffffec0000000000 | -20 TB | fffffbffffffffff | 16 TB | KASAN shadow memory | 44 | ffffec0000000000 | -20 TB | fffffbffffffffff | 16 TB | KASAN shadow memory |
| 44 | fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole | ||
| 45 | | | | | vaddr_end for KASLR | ||
| 46 | fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping | ||
| 47 | fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | LDT remap for PTI | ||
| 48 | ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks | ||
| 49 | __________________|____________|__________________|_________|____________________________________________________________ | 45 | __________________|____________|__________________|_________|____________________________________________________________ |
| 50 | | | 46 | | |
| 51 | | Identical layout to the 47-bit one from here on: | 47 | | Identical layout to the 56-bit one from here on: |
| 52 | ____________________________________________________________|____________________________________________________________ | 48 | ____________________________________________________________|____________________________________________________________ |
| 53 | | | | | | 49 | | | | | |
| 50 | fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole | ||
| 51 | | | | | vaddr_end for KASLR | ||
| 52 | fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping | ||
| 53 | fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole | ||
| 54 | ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks | ||
| 54 | ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole | 55 | ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole |
| 55 | ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space | 56 | ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space |
| 56 | ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole | 57 | ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole |
| @@ -83,7 +84,7 @@ Notes: | |||
| 83 | __________________|____________|__________________|_________|___________________________________________________________ | 84 | __________________|____________|__________________|_________|___________________________________________________________ |
| 84 | | | | | | 85 | | | | | |
| 85 | 0000800000000000 | +64 PB | ffff7fffffffffff | ~16K PB | ... huge, still almost 64 bits wide hole of non-canonical | 86 | 0000800000000000 | +64 PB | ffff7fffffffffff | ~16K PB | ... huge, still almost 64 bits wide hole of non-canonical |
| 86 | | | | | virtual memory addresses up to the -128 TB | 87 | | | | | virtual memory addresses up to the -64 PB |
| 87 | | | | | starting offset of kernel mappings. | 88 | | | | | starting offset of kernel mappings. |
| 88 | __________________|____________|__________________|_________|___________________________________________________________ | 89 | __________________|____________|__________________|_________|___________________________________________________________ |
| 89 | | | 90 | | |
| @@ -91,23 +92,24 @@ __________________|____________|__________________|_________|___________________ | |||
| 91 | ____________________________________________________________|___________________________________________________________ | 92 | ____________________________________________________________|___________________________________________________________ |
| 92 | | | | | | 93 | | | | | |
| 93 | ff00000000000000 | -64 PB | ff0fffffffffffff | 4 PB | ... guard hole, also reserved for hypervisor | 94 | ff00000000000000 | -64 PB | ff0fffffffffffff | 4 PB | ... guard hole, also reserved for hypervisor |
| 94 | ff10000000000000 | -60 PB | ff8fffffffffffff | 32 PB | direct mapping of all physical memory (page_offset_base) | 95 | ff10000000000000 | -60 PB | ff10ffffffffffff | 0.25 PB | LDT remap for PTI |
| 95 | ff90000000000000 | -28 PB | ff9fffffffffffff | 4 PB | LDT remap for PTI | 96 | ff11000000000000 | -59.75 PB | ff90ffffffffffff | 32 PB | direct mapping of all physical memory (page_offset_base) |
| 97 | ff91000000000000 | -27.75 PB | ff9fffffffffffff | 3.75 PB | ... unused hole | ||
| 96 | ffa0000000000000 | -24 PB | ffd1ffffffffffff | 12.5 PB | vmalloc/ioremap space (vmalloc_base) | 98 | ffa0000000000000 | -24 PB | ffd1ffffffffffff | 12.5 PB | vmalloc/ioremap space (vmalloc_base) |
| 97 | ffd2000000000000 | -11.5 PB | ffd3ffffffffffff | 0.5 PB | ... unused hole | 99 | ffd2000000000000 | -11.5 PB | ffd3ffffffffffff | 0.5 PB | ... unused hole |
| 98 | ffd4000000000000 | -11 PB | ffd5ffffffffffff | 0.5 PB | virtual memory map (vmemmap_base) | 100 | ffd4000000000000 | -11 PB | ffd5ffffffffffff | 0.5 PB | virtual memory map (vmemmap_base) |
| 99 | ffd6000000000000 | -10.5 PB | ffdeffffffffffff | 2.25 PB | ... unused hole | 101 | ffd6000000000000 | -10.5 PB | ffdeffffffffffff | 2.25 PB | ... unused hole |
| 100 | ffdf000000000000 | -8.25 PB | fffffdffffffffff | ~8 PB | KASAN shadow memory | 102 | ffdf000000000000 | -8.25 PB | fffffdffffffffff | ~8 PB | KASAN shadow memory |
| 101 | fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole | ||
| 102 | | | | | vaddr_end for KASLR | ||
| 103 | fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping | ||
| 104 | fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole | ||
| 105 | ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks | ||
| 106 | __________________|____________|__________________|_________|____________________________________________________________ | 103 | __________________|____________|__________________|_________|____________________________________________________________ |
| 107 | | | 104 | | |
| 108 | | Identical layout to the 47-bit one from here on: | 105 | | Identical layout to the 47-bit one from here on: |
| 109 | ____________________________________________________________|____________________________________________________________ | 106 | ____________________________________________________________|____________________________________________________________ |
| 110 | | | | | | 107 | | | | | |
| 108 | fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole | ||
| 109 | | | | | vaddr_end for KASLR | ||
| 110 | fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping | ||
| 111 | fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole | ||
| 112 | ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks | ||
| 111 | ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole | 113 | ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole |
| 112 | ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space | 114 | ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space |
| 113 | ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole | 115 | ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole |
diff --git a/Documentation/x86/zero-page.txt b/Documentation/x86/zero-page.txt index 97b7adbceda4..68aed077f7b6 100644 --- a/Documentation/x86/zero-page.txt +++ b/Documentation/x86/zero-page.txt | |||
| @@ -25,7 +25,7 @@ Offset Proto Name Meaning | |||
| 25 | 0C8/004 ALL ext_cmd_line_ptr cmd_line_ptr high 32bits | 25 | 0C8/004 ALL ext_cmd_line_ptr cmd_line_ptr high 32bits |
| 26 | 140/080 ALL edid_info Video mode setup (struct edid_info) | 26 | 140/080 ALL edid_info Video mode setup (struct edid_info) |
| 27 | 1C0/020 ALL efi_info EFI 32 information (struct efi_info) | 27 | 1C0/020 ALL efi_info EFI 32 information (struct efi_info) |
| 28 | 1E0/004 ALL alk_mem_k Alternative mem check, in KB | 28 | 1E0/004 ALL alt_mem_k Alternative mem check, in KB |
| 29 | 1E4/004 ALL scratch Scratch field for the kernel setup code | 29 | 1E4/004 ALL scratch Scratch field for the kernel setup code |
| 30 | 1E8/001 ALL e820_entries Number of entries in e820_table (below) | 30 | 1E8/001 ALL e820_entries Number of entries in e820_table (below) |
| 31 | 1E9/001 ALL eddbuf_entries Number of entries in eddbuf (below) | 31 | 1E9/001 ALL eddbuf_entries Number of entries in eddbuf (below) |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ba7e3464ee92..9d734f3c8234 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -525,7 +525,6 @@ config X86_VSMP | |||
| 525 | bool "ScaleMP vSMP" | 525 | bool "ScaleMP vSMP" |
| 526 | select HYPERVISOR_GUEST | 526 | select HYPERVISOR_GUEST |
| 527 | select PARAVIRT | 527 | select PARAVIRT |
| 528 | select PARAVIRT_XXL | ||
| 529 | depends on X86_64 && PCI | 528 | depends on X86_64 && PCI |
| 530 | depends on X86_EXTENDED_PLATFORM | 529 | depends on X86_EXTENDED_PLATFORM |
| 531 | depends on SMP | 530 | depends on SMP |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 5b562e464009..88398fdf8129 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
| @@ -213,8 +213,6 @@ ifdef CONFIG_X86_64 | |||
| 213 | KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000) | 213 | KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000) |
| 214 | endif | 214 | endif |
| 215 | 215 | ||
| 216 | # Speed up the build | ||
| 217 | KBUILD_CFLAGS += -pipe | ||
| 218 | # Workaround for a gcc prelease that unfortunately was shipped in a suse release | 216 | # Workaround for a gcc prelease that unfortunately was shipped in a suse release |
| 219 | KBUILD_CFLAGS += -Wno-sign-compare | 217 | KBUILD_CFLAGS += -Wno-sign-compare |
| 220 | # | 218 | # |
| @@ -239,7 +237,7 @@ archheaders: | |||
| 239 | archmacros: | 237 | archmacros: |
| 240 | $(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s | 238 | $(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s |
| 241 | 239 | ||
| 242 | ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s -Wa,- | 240 | ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s |
| 243 | export ASM_MACRO_FLAGS | 241 | export ASM_MACRO_FLAGS |
| 244 | KBUILD_CFLAGS += $(ASM_MACRO_FLAGS) | 242 | KBUILD_CFLAGS += $(ASM_MACRO_FLAGS) |
| 245 | 243 | ||
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 4da9b1c58d28..c1a812bd5a27 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
| @@ -221,6 +221,8 @@ static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_am | |||
| 221 | 221 | ||
| 222 | int mce_available(struct cpuinfo_x86 *c); | 222 | int mce_available(struct cpuinfo_x86 *c); |
| 223 | bool mce_is_memory_error(struct mce *m); | 223 | bool mce_is_memory_error(struct mce *m); |
| 224 | bool mce_is_correctable(struct mce *m); | ||
| 225 | int mce_usable_address(struct mce *m); | ||
| 224 | 226 | ||
| 225 | DECLARE_PER_CPU(unsigned, mce_exception_count); | 227 | DECLARE_PER_CPU(unsigned, mce_exception_count); |
| 226 | DECLARE_PER_CPU(unsigned, mce_poll_count); | 228 | DECLARE_PER_CPU(unsigned, mce_poll_count); |
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 0d6271cce198..1d0a7778e163 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h | |||
| @@ -232,7 +232,7 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2) | |||
| 232 | : "cc"); | 232 | : "cc"); |
| 233 | } | 233 | } |
| 234 | #endif | 234 | #endif |
| 235 | return hv_status; | 235 | return hv_status; |
| 236 | } | 236 | } |
| 237 | 237 | ||
| 238 | /* | 238 | /* |
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index cd0cf1c568b4..8f657286d599 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h | |||
| @@ -33,12 +33,14 @@ | |||
| 33 | 33 | ||
| 34 | /* | 34 | /* |
| 35 | * Set __PAGE_OFFSET to the most negative possible address + | 35 | * Set __PAGE_OFFSET to the most negative possible address + |
| 36 | * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a | 36 | * PGDIR_SIZE*17 (pgd slot 273). |
| 37 | * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's | 37 | * |
| 38 | * what Xen requires. | 38 | * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for |
| 39 | * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary, | ||
| 40 | * but it's what Xen requires. | ||
| 39 | */ | 41 | */ |
| 40 | #define __PAGE_OFFSET_BASE_L5 _AC(0xff10000000000000, UL) | 42 | #define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL) |
| 41 | #define __PAGE_OFFSET_BASE_L4 _AC(0xffff880000000000, UL) | 43 | #define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL) |
| 42 | 44 | ||
| 43 | #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT | 45 | #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT |
| 44 | #define __PAGE_OFFSET page_offset_base | 46 | #define __PAGE_OFFSET page_offset_base |
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 04edd2d58211..84bd9bdc1987 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h | |||
| @@ -111,9 +111,7 @@ extern unsigned int ptrs_per_p4d; | |||
| 111 | */ | 111 | */ |
| 112 | #define MAXMEM (1UL << MAX_PHYSMEM_BITS) | 112 | #define MAXMEM (1UL << MAX_PHYSMEM_BITS) |
| 113 | 113 | ||
| 114 | #define LDT_PGD_ENTRY_L4 -3UL | 114 | #define LDT_PGD_ENTRY -240UL |
| 115 | #define LDT_PGD_ENTRY_L5 -112UL | ||
| 116 | #define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) | ||
| 117 | #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) | 115 | #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) |
| 118 | #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) | 116 | #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) |
| 119 | 117 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8c66d2fc8f81..36d2696c9563 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
| @@ -485,7 +485,7 @@ static void mce_report_event(struct pt_regs *regs) | |||
| 485 | * be somewhat complicated (e.g. segment offset would require an instruction | 485 | * be somewhat complicated (e.g. segment offset would require an instruction |
| 486 | * parser). So only support physical addresses up to page granuality for now. | 486 | * parser). So only support physical addresses up to page granuality for now. |
| 487 | */ | 487 | */ |
| 488 | static int mce_usable_address(struct mce *m) | 488 | int mce_usable_address(struct mce *m) |
| 489 | { | 489 | { |
| 490 | if (!(m->status & MCI_STATUS_ADDRV)) | 490 | if (!(m->status & MCI_STATUS_ADDRV)) |
| 491 | return 0; | 491 | return 0; |
| @@ -505,6 +505,7 @@ static int mce_usable_address(struct mce *m) | |||
| 505 | 505 | ||
| 506 | return 1; | 506 | return 1; |
| 507 | } | 507 | } |
| 508 | EXPORT_SYMBOL_GPL(mce_usable_address); | ||
| 508 | 509 | ||
| 509 | bool mce_is_memory_error(struct mce *m) | 510 | bool mce_is_memory_error(struct mce *m) |
| 510 | { | 511 | { |
| @@ -534,7 +535,7 @@ bool mce_is_memory_error(struct mce *m) | |||
| 534 | } | 535 | } |
| 535 | EXPORT_SYMBOL_GPL(mce_is_memory_error); | 536 | EXPORT_SYMBOL_GPL(mce_is_memory_error); |
| 536 | 537 | ||
| 537 | static bool mce_is_correctable(struct mce *m) | 538 | bool mce_is_correctable(struct mce *m) |
| 538 | { | 539 | { |
| 539 | if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) | 540 | if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) |
| 540 | return false; | 541 | return false; |
| @@ -547,6 +548,7 @@ static bool mce_is_correctable(struct mce *m) | |||
| 547 | 548 | ||
| 548 | return true; | 549 | return true; |
| 549 | } | 550 | } |
| 551 | EXPORT_SYMBOL_GPL(mce_is_correctable); | ||
| 550 | 552 | ||
| 551 | static bool cec_add_mce(struct mce *m) | 553 | static bool cec_add_mce(struct mce *m) |
| 552 | { | 554 | { |
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 1c72f3819eb1..e81a2db42df7 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
| 21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
| 22 | #include <linux/kexec.h> | 22 | #include <linux/kexec.h> |
| 23 | #include <linux/i8253.h> | ||
| 23 | #include <asm/processor.h> | 24 | #include <asm/processor.h> |
| 24 | #include <asm/hypervisor.h> | 25 | #include <asm/hypervisor.h> |
| 25 | #include <asm/hyperv-tlfs.h> | 26 | #include <asm/hyperv-tlfs.h> |
| @@ -295,6 +296,16 @@ static void __init ms_hyperv_init_platform(void) | |||
| 295 | if (efi_enabled(EFI_BOOT)) | 296 | if (efi_enabled(EFI_BOOT)) |
| 296 | x86_platform.get_nmi_reason = hv_get_nmi_reason; | 297 | x86_platform.get_nmi_reason = hv_get_nmi_reason; |
| 297 | 298 | ||
| 299 | /* | ||
| 300 | * Hyper-V VMs have a PIT emulation quirk such that zeroing the | ||
| 301 | * counter register during PIT shutdown restarts the PIT. So it | ||
| 302 | * continues to interrupt @18.2 HZ. Setting i8253_clear_counter | ||
| 303 | * to false tells pit_shutdown() not to zero the counter so that | ||
| 304 | * the PIT really is shutdown. Generation 2 VMs don't have a PIT, | ||
| 305 | * and setting this value has no effect. | ||
| 306 | */ | ||
| 307 | i8253_clear_counter_on_shutdown = false; | ||
| 308 | |||
| 298 | #if IS_ENABLED(CONFIG_HYPERV) | 309 | #if IS_ENABLED(CONFIG_HYPERV) |
| 299 | /* | 310 | /* |
| 300 | * Setup the hook to get control post apic initialization. | 311 | * Setup the hook to get control post apic initialization. |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index d9ab49bed8af..0eda91f8eeac 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
| @@ -77,7 +77,7 @@ static __init int setup_vmw_sched_clock(char *s) | |||
| 77 | } | 77 | } |
| 78 | early_param("no-vmw-sched-clock", setup_vmw_sched_clock); | 78 | early_param("no-vmw-sched-clock", setup_vmw_sched_clock); |
| 79 | 79 | ||
| 80 | static unsigned long long vmware_sched_clock(void) | 80 | static unsigned long long notrace vmware_sched_clock(void) |
| 81 | { | 81 | { |
| 82 | unsigned long long ns; | 82 | unsigned long long ns; |
| 83 | 83 | ||
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index ab18e0884dc6..6135ae8ce036 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
| @@ -199,14 +199,6 @@ static void sanity_check_ldt_mapping(struct mm_struct *mm) | |||
| 199 | /* | 199 | /* |
| 200 | * If PTI is enabled, this maps the LDT into the kernelmode and | 200 | * If PTI is enabled, this maps the LDT into the kernelmode and |
| 201 | * usermode tables for the given mm. | 201 | * usermode tables for the given mm. |
| 202 | * | ||
| 203 | * There is no corresponding unmap function. Even if the LDT is freed, we | ||
| 204 | * leave the PTEs around until the slot is reused or the mm is destroyed. | ||
| 205 | * This is harmless: the LDT is always in ordinary memory, and no one will | ||
| 206 | * access the freed slot. | ||
| 207 | * | ||
| 208 | * If we wanted to unmap freed LDTs, we'd also need to do a flush to make | ||
| 209 | * it useful, and the flush would slow down modify_ldt(). | ||
| 210 | */ | 202 | */ |
| 211 | static int | 203 | static int |
| 212 | map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | 204 | map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) |
| @@ -214,8 +206,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
| 214 | unsigned long va; | 206 | unsigned long va; |
| 215 | bool is_vmalloc; | 207 | bool is_vmalloc; |
| 216 | spinlock_t *ptl; | 208 | spinlock_t *ptl; |
| 217 | pgd_t *pgd; | 209 | int i, nr_pages; |
| 218 | int i; | ||
| 219 | 210 | ||
| 220 | if (!static_cpu_has(X86_FEATURE_PTI)) | 211 | if (!static_cpu_has(X86_FEATURE_PTI)) |
| 221 | return 0; | 212 | return 0; |
| @@ -229,16 +220,11 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
| 229 | /* Check if the current mappings are sane */ | 220 | /* Check if the current mappings are sane */ |
| 230 | sanity_check_ldt_mapping(mm); | 221 | sanity_check_ldt_mapping(mm); |
| 231 | 222 | ||
| 232 | /* | ||
| 233 | * Did we already have the top level entry allocated? We can't | ||
| 234 | * use pgd_none() for this because it doens't do anything on | ||
| 235 | * 4-level page table kernels. | ||
| 236 | */ | ||
| 237 | pgd = pgd_offset(mm, LDT_BASE_ADDR); | ||
| 238 | |||
| 239 | is_vmalloc = is_vmalloc_addr(ldt->entries); | 223 | is_vmalloc = is_vmalloc_addr(ldt->entries); |
| 240 | 224 | ||
| 241 | for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) { | 225 | nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); |
| 226 | |||
| 227 | for (i = 0; i < nr_pages; i++) { | ||
| 242 | unsigned long offset = i << PAGE_SHIFT; | 228 | unsigned long offset = i << PAGE_SHIFT; |
| 243 | const void *src = (char *)ldt->entries + offset; | 229 | const void *src = (char *)ldt->entries + offset; |
| 244 | unsigned long pfn; | 230 | unsigned long pfn; |
| @@ -272,13 +258,39 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
| 272 | /* Propagate LDT mapping to the user page-table */ | 258 | /* Propagate LDT mapping to the user page-table */ |
| 273 | map_ldt_struct_to_user(mm); | 259 | map_ldt_struct_to_user(mm); |
| 274 | 260 | ||
| 275 | va = (unsigned long)ldt_slot_va(slot); | ||
| 276 | flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT, false); | ||
| 277 | |||
| 278 | ldt->slot = slot; | 261 | ldt->slot = slot; |
| 279 | return 0; | 262 | return 0; |
| 280 | } | 263 | } |
| 281 | 264 | ||
| 265 | static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) | ||
| 266 | { | ||
| 267 | unsigned long va; | ||
| 268 | int i, nr_pages; | ||
| 269 | |||
| 270 | if (!ldt) | ||
| 271 | return; | ||
| 272 | |||
| 273 | /* LDT map/unmap is only required for PTI */ | ||
| 274 | if (!static_cpu_has(X86_FEATURE_PTI)) | ||
| 275 | return; | ||
| 276 | |||
| 277 | nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); | ||
| 278 | |||
| 279 | for (i = 0; i < nr_pages; i++) { | ||
| 280 | unsigned long offset = i << PAGE_SHIFT; | ||
| 281 | spinlock_t *ptl; | ||
| 282 | pte_t *ptep; | ||
| 283 | |||
| 284 | va = (unsigned long)ldt_slot_va(ldt->slot) + offset; | ||
| 285 | ptep = get_locked_pte(mm, va, &ptl); | ||
| 286 | pte_clear(mm, va, ptep); | ||
| 287 | pte_unmap_unlock(ptep, ptl); | ||
| 288 | } | ||
| 289 | |||
| 290 | va = (unsigned long)ldt_slot_va(ldt->slot); | ||
| 291 | flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false); | ||
| 292 | } | ||
| 293 | |||
| 282 | #else /* !CONFIG_PAGE_TABLE_ISOLATION */ | 294 | #else /* !CONFIG_PAGE_TABLE_ISOLATION */ |
| 283 | 295 | ||
| 284 | static int | 296 | static int |
| @@ -286,6 +298,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
| 286 | { | 298 | { |
| 287 | return 0; | 299 | return 0; |
| 288 | } | 300 | } |
| 301 | |||
| 302 | static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) | ||
| 303 | { | ||
| 304 | } | ||
| 289 | #endif /* CONFIG_PAGE_TABLE_ISOLATION */ | 305 | #endif /* CONFIG_PAGE_TABLE_ISOLATION */ |
| 290 | 306 | ||
| 291 | static void free_ldt_pgtables(struct mm_struct *mm) | 307 | static void free_ldt_pgtables(struct mm_struct *mm) |
| @@ -524,6 +540,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) | |||
| 524 | } | 540 | } |
| 525 | 541 | ||
| 526 | install_ldt(mm, new_ldt); | 542 | install_ldt(mm, new_ldt); |
| 543 | unmap_ldt_struct(mm, old_ldt); | ||
| 527 | free_ldt_struct(old_ldt); | 544 | free_ldt_struct(old_ldt); |
| 528 | error = 0; | 545 | error = 0; |
| 529 | 546 | ||
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 1eae5af491c2..891a75dbc131 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c | |||
| @@ -26,65 +26,8 @@ | |||
| 26 | 26 | ||
| 27 | #define TOPOLOGY_REGISTER_OFFSET 0x10 | 27 | #define TOPOLOGY_REGISTER_OFFSET 0x10 |
| 28 | 28 | ||
| 29 | #if defined CONFIG_PCI && defined CONFIG_PARAVIRT_XXL | 29 | #ifdef CONFIG_PCI |
| 30 | /* | 30 | static void __init set_vsmp_ctl(void) |
| 31 | * Interrupt control on vSMPowered systems: | ||
| 32 | * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' | ||
| 33 | * and vice versa. | ||
| 34 | */ | ||
| 35 | |||
| 36 | asmlinkage __visible unsigned long vsmp_save_fl(void) | ||
| 37 | { | ||
| 38 | unsigned long flags = native_save_fl(); | ||
| 39 | |||
| 40 | if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC)) | ||
| 41 | flags &= ~X86_EFLAGS_IF; | ||
| 42 | return flags; | ||
| 43 | } | ||
| 44 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl); | ||
| 45 | |||
| 46 | __visible void vsmp_restore_fl(unsigned long flags) | ||
| 47 | { | ||
| 48 | if (flags & X86_EFLAGS_IF) | ||
| 49 | flags &= ~X86_EFLAGS_AC; | ||
| 50 | else | ||
| 51 | flags |= X86_EFLAGS_AC; | ||
| 52 | native_restore_fl(flags); | ||
| 53 | } | ||
| 54 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl); | ||
| 55 | |||
| 56 | asmlinkage __visible void vsmp_irq_disable(void) | ||
| 57 | { | ||
| 58 | unsigned long flags = native_save_fl(); | ||
| 59 | |||
| 60 | native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); | ||
| 61 | } | ||
| 62 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable); | ||
| 63 | |||
| 64 | asmlinkage __visible void vsmp_irq_enable(void) | ||
| 65 | { | ||
| 66 | unsigned long flags = native_save_fl(); | ||
| 67 | |||
| 68 | native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); | ||
| 69 | } | ||
| 70 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable); | ||
| 71 | |||
| 72 | static unsigned __init vsmp_patch(u8 type, void *ibuf, | ||
| 73 | unsigned long addr, unsigned len) | ||
| 74 | { | ||
| 75 | switch (type) { | ||
| 76 | case PARAVIRT_PATCH(irq.irq_enable): | ||
| 77 | case PARAVIRT_PATCH(irq.irq_disable): | ||
| 78 | case PARAVIRT_PATCH(irq.save_fl): | ||
| 79 | case PARAVIRT_PATCH(irq.restore_fl): | ||
| 80 | return paravirt_patch_default(type, ibuf, addr, len); | ||
| 81 | default: | ||
| 82 | return native_patch(type, ibuf, addr, len); | ||
| 83 | } | ||
| 84 | |||
| 85 | } | ||
| 86 | |||
| 87 | static void __init set_vsmp_pv_ops(void) | ||
| 88 | { | 31 | { |
| 89 | void __iomem *address; | 32 | void __iomem *address; |
| 90 | unsigned int cap, ctl, cfg; | 33 | unsigned int cap, ctl, cfg; |
| @@ -109,28 +52,12 @@ static void __init set_vsmp_pv_ops(void) | |||
| 109 | } | 52 | } |
| 110 | #endif | 53 | #endif |
| 111 | 54 | ||
| 112 | if (cap & ctl & (1 << 4)) { | ||
| 113 | /* Setup irq ops and turn on vSMP IRQ fastpath handling */ | ||
| 114 | pv_ops.irq.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable); | ||
| 115 | pv_ops.irq.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable); | ||
| 116 | pv_ops.irq.save_fl = PV_CALLEE_SAVE(vsmp_save_fl); | ||
| 117 | pv_ops.irq.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl); | ||
| 118 | pv_ops.init.patch = vsmp_patch; | ||
| 119 | ctl &= ~(1 << 4); | ||
| 120 | } | ||
| 121 | writel(ctl, address + 4); | 55 | writel(ctl, address + 4); |
| 122 | ctl = readl(address + 4); | 56 | ctl = readl(address + 4); |
| 123 | pr_info("vSMP CTL: control set to:0x%08x\n", ctl); | 57 | pr_info("vSMP CTL: control set to:0x%08x\n", ctl); |
| 124 | 58 | ||
| 125 | early_iounmap(address, 8); | 59 | early_iounmap(address, 8); |
| 126 | } | 60 | } |
| 127 | #else | ||
| 128 | static void __init set_vsmp_pv_ops(void) | ||
| 129 | { | ||
| 130 | } | ||
| 131 | #endif | ||
| 132 | |||
| 133 | #ifdef CONFIG_PCI | ||
| 134 | static int is_vsmp = -1; | 61 | static int is_vsmp = -1; |
| 135 | 62 | ||
| 136 | static void __init detect_vsmp_box(void) | 63 | static void __init detect_vsmp_box(void) |
| @@ -164,11 +91,14 @@ static int is_vsmp_box(void) | |||
| 164 | { | 91 | { |
| 165 | return 0; | 92 | return 0; |
| 166 | } | 93 | } |
| 94 | static void __init set_vsmp_ctl(void) | ||
| 95 | { | ||
| 96 | } | ||
| 167 | #endif | 97 | #endif |
| 168 | 98 | ||
| 169 | static void __init vsmp_cap_cpus(void) | 99 | static void __init vsmp_cap_cpus(void) |
| 170 | { | 100 | { |
| 171 | #if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) | 101 | #if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) && defined(CONFIG_PCI) |
| 172 | void __iomem *address; | 102 | void __iomem *address; |
| 173 | unsigned int cfg, topology, node_shift, maxcpus; | 103 | unsigned int cfg, topology, node_shift, maxcpus; |
| 174 | 104 | ||
| @@ -221,6 +151,6 @@ void __init vsmp_init(void) | |||
| 221 | 151 | ||
| 222 | vsmp_cap_cpus(); | 152 | vsmp_cap_cpus(); |
| 223 | 153 | ||
| 224 | set_vsmp_pv_ops(); | 154 | set_vsmp_ctl(); |
| 225 | return; | 155 | return; |
| 226 | } | 156 | } |
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 0d7b3ae4960b..a5d7ed125337 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c | |||
| @@ -1905,7 +1905,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |||
| 1905 | init_top_pgt[0] = __pgd(0); | 1905 | init_top_pgt[0] = __pgd(0); |
| 1906 | 1906 | ||
| 1907 | /* Pre-constructed entries are in pfn, so convert to mfn */ | 1907 | /* Pre-constructed entries are in pfn, so convert to mfn */ |
| 1908 | /* L4[272] -> level3_ident_pgt */ | 1908 | /* L4[273] -> level3_ident_pgt */ |
| 1909 | /* L4[511] -> level3_kernel_pgt */ | 1909 | /* L4[511] -> level3_kernel_pgt */ |
| 1910 | convert_pfn_mfn(init_top_pgt); | 1910 | convert_pfn_mfn(init_top_pgt); |
| 1911 | 1911 | ||
| @@ -1925,8 +1925,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |||
| 1925 | addr[0] = (unsigned long)pgd; | 1925 | addr[0] = (unsigned long)pgd; |
| 1926 | addr[1] = (unsigned long)l3; | 1926 | addr[1] = (unsigned long)l3; |
| 1927 | addr[2] = (unsigned long)l2; | 1927 | addr[2] = (unsigned long)l2; |
| 1928 | /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: | 1928 | /* Graft it onto L4[273][0]. Note that we creating an aliasing problem: |
| 1929 | * Both L4[272][0] and L4[511][510] have entries that point to the same | 1929 | * Both L4[273][0] and L4[511][510] have entries that point to the same |
| 1930 | * L2 (PMD) tables. Meaning that if you modify it in __va space | 1930 | * L2 (PMD) tables. Meaning that if you modify it in __va space |
| 1931 | * it will be also modified in the __ka space! (But if you just | 1931 | * it will be also modified in the __ka space! (But if you just |
| 1932 | * modify the PMD table to point to other PTE's or none, then you | 1932 | * modify the PMD table to point to other PTE's or none, then you |
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c index e9626bf6ca29..d6c1b10f6c25 100644 --- a/drivers/acpi/nfit/mce.c +++ b/drivers/acpi/nfit/mce.c | |||
| @@ -25,8 +25,12 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, | |||
| 25 | struct acpi_nfit_desc *acpi_desc; | 25 | struct acpi_nfit_desc *acpi_desc; |
| 26 | struct nfit_spa *nfit_spa; | 26 | struct nfit_spa *nfit_spa; |
| 27 | 27 | ||
| 28 | /* We only care about memory errors */ | 28 | /* We only care about uncorrectable memory errors */ |
| 29 | if (!mce_is_memory_error(mce)) | 29 | if (!mce_is_memory_error(mce) || mce_is_correctable(mce)) |
| 30 | return NOTIFY_DONE; | ||
| 31 | |||
| 32 | /* Verify the address reported in the MCE is valid. */ | ||
| 33 | if (!mce_usable_address(mce)) | ||
| 30 | return NOTIFY_DONE; | 34 | return NOTIFY_DONE; |
| 31 | 35 | ||
| 32 | /* | 36 | /* |
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c index 9c38895542f4..d4350bb10b83 100644 --- a/drivers/clocksource/i8253.c +++ b/drivers/clocksource/i8253.c | |||
| @@ -20,6 +20,13 @@ | |||
| 20 | DEFINE_RAW_SPINLOCK(i8253_lock); | 20 | DEFINE_RAW_SPINLOCK(i8253_lock); |
| 21 | EXPORT_SYMBOL(i8253_lock); | 21 | EXPORT_SYMBOL(i8253_lock); |
| 22 | 22 | ||
| 23 | /* | ||
| 24 | * Handle PIT quirk in pit_shutdown() where zeroing the counter register | ||
| 25 | * restarts the PIT, negating the shutdown. On platforms with the quirk, | ||
| 26 | * platform specific code can set this to false. | ||
| 27 | */ | ||
| 28 | bool i8253_clear_counter_on_shutdown __ro_after_init = true; | ||
| 29 | |||
| 23 | #ifdef CONFIG_CLKSRC_I8253 | 30 | #ifdef CONFIG_CLKSRC_I8253 |
| 24 | /* | 31 | /* |
| 25 | * Since the PIT overflows every tick, its not very useful | 32 | * Since the PIT overflows every tick, its not very useful |
| @@ -109,8 +116,11 @@ static int pit_shutdown(struct clock_event_device *evt) | |||
| 109 | raw_spin_lock(&i8253_lock); | 116 | raw_spin_lock(&i8253_lock); |
| 110 | 117 | ||
| 111 | outb_p(0x30, PIT_MODE); | 118 | outb_p(0x30, PIT_MODE); |
| 112 | outb_p(0, PIT_CH0); | 119 | |
| 113 | outb_p(0, PIT_CH0); | 120 | if (i8253_clear_counter_on_shutdown) { |
| 121 | outb_p(0, PIT_CH0); | ||
| 122 | outb_p(0, PIT_CH0); | ||
| 123 | } | ||
| 114 | 124 | ||
| 115 | raw_spin_unlock(&i8253_lock); | 125 | raw_spin_unlock(&i8253_lock); |
| 116 | return 0; | 126 | return 0; |
diff --git a/include/linux/i8253.h b/include/linux/i8253.h index e6bb36a97519..8336b2f6f834 100644 --- a/include/linux/i8253.h +++ b/include/linux/i8253.h | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) | 21 | #define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) |
| 22 | 22 | ||
| 23 | extern raw_spinlock_t i8253_lock; | 23 | extern raw_spinlock_t i8253_lock; |
| 24 | extern bool i8253_clear_counter_on_shutdown; | ||
| 24 | extern struct clock_event_device i8253_clockevent; | 25 | extern struct clock_event_device i8253_clockevent; |
| 25 | extern void clockevent_i8253_init(bool oneshot); | 26 | extern void clockevent_i8253_init(bool oneshot); |
| 26 | 27 | ||
