diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mshyperv.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/vmware.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/ldt.c | 59 | ||||
-rw-r--r-- | arch/x86/kernel/vsmp_64.c | 84 |
5 files changed, 61 insertions, 101 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8c66d2fc8f81..36d2696c9563 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -485,7 +485,7 @@ static void mce_report_event(struct pt_regs *regs) | |||
485 | * be somewhat complicated (e.g. segment offset would require an instruction | 485 | * be somewhat complicated (e.g. segment offset would require an instruction |
486 | * parser). So only support physical addresses up to page granuality for now. | 486 | * parser). So only support physical addresses up to page granuality for now. |
487 | */ | 487 | */ |
488 | static int mce_usable_address(struct mce *m) | 488 | int mce_usable_address(struct mce *m) |
489 | { | 489 | { |
490 | if (!(m->status & MCI_STATUS_ADDRV)) | 490 | if (!(m->status & MCI_STATUS_ADDRV)) |
491 | return 0; | 491 | return 0; |
@@ -505,6 +505,7 @@ static int mce_usable_address(struct mce *m) | |||
505 | 505 | ||
506 | return 1; | 506 | return 1; |
507 | } | 507 | } |
508 | EXPORT_SYMBOL_GPL(mce_usable_address); | ||
508 | 509 | ||
509 | bool mce_is_memory_error(struct mce *m) | 510 | bool mce_is_memory_error(struct mce *m) |
510 | { | 511 | { |
@@ -534,7 +535,7 @@ bool mce_is_memory_error(struct mce *m) | |||
534 | } | 535 | } |
535 | EXPORT_SYMBOL_GPL(mce_is_memory_error); | 536 | EXPORT_SYMBOL_GPL(mce_is_memory_error); |
536 | 537 | ||
537 | static bool mce_is_correctable(struct mce *m) | 538 | bool mce_is_correctable(struct mce *m) |
538 | { | 539 | { |
539 | if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) | 540 | if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) |
540 | return false; | 541 | return false; |
@@ -547,6 +548,7 @@ static bool mce_is_correctable(struct mce *m) | |||
547 | 548 | ||
548 | return true; | 549 | return true; |
549 | } | 550 | } |
551 | EXPORT_SYMBOL_GPL(mce_is_correctable); | ||
550 | 552 | ||
551 | static bool cec_add_mce(struct mce *m) | 553 | static bool cec_add_mce(struct mce *m) |
552 | { | 554 | { |
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 1c72f3819eb1..e81a2db42df7 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
22 | #include <linux/kexec.h> | 22 | #include <linux/kexec.h> |
23 | #include <linux/i8253.h> | ||
23 | #include <asm/processor.h> | 24 | #include <asm/processor.h> |
24 | #include <asm/hypervisor.h> | 25 | #include <asm/hypervisor.h> |
25 | #include <asm/hyperv-tlfs.h> | 26 | #include <asm/hyperv-tlfs.h> |
@@ -295,6 +296,16 @@ static void __init ms_hyperv_init_platform(void) | |||
295 | if (efi_enabled(EFI_BOOT)) | 296 | if (efi_enabled(EFI_BOOT)) |
296 | x86_platform.get_nmi_reason = hv_get_nmi_reason; | 297 | x86_platform.get_nmi_reason = hv_get_nmi_reason; |
297 | 298 | ||
299 | /* | ||
300 | * Hyper-V VMs have a PIT emulation quirk such that zeroing the | ||
301 | * counter register during PIT shutdown restarts the PIT. So it | ||
302 | * continues to interrupt @18.2 HZ. Setting i8253_clear_counter | ||
303 | * to false tells pit_shutdown() not to zero the counter so that | ||
304 | * the PIT really is shutdown. Generation 2 VMs don't have a PIT, | ||
305 | * and setting this value has no effect. | ||
306 | */ | ||
307 | i8253_clear_counter_on_shutdown = false; | ||
308 | |||
298 | #if IS_ENABLED(CONFIG_HYPERV) | 309 | #if IS_ENABLED(CONFIG_HYPERV) |
299 | /* | 310 | /* |
300 | * Setup the hook to get control post apic initialization. | 311 | * Setup the hook to get control post apic initialization. |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index d9ab49bed8af..0eda91f8eeac 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
@@ -77,7 +77,7 @@ static __init int setup_vmw_sched_clock(char *s) | |||
77 | } | 77 | } |
78 | early_param("no-vmw-sched-clock", setup_vmw_sched_clock); | 78 | early_param("no-vmw-sched-clock", setup_vmw_sched_clock); |
79 | 79 | ||
80 | static unsigned long long vmware_sched_clock(void) | 80 | static unsigned long long notrace vmware_sched_clock(void) |
81 | { | 81 | { |
82 | unsigned long long ns; | 82 | unsigned long long ns; |
83 | 83 | ||
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index ab18e0884dc6..6135ae8ce036 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -199,14 +199,6 @@ static void sanity_check_ldt_mapping(struct mm_struct *mm) | |||
199 | /* | 199 | /* |
200 | * If PTI is enabled, this maps the LDT into the kernelmode and | 200 | * If PTI is enabled, this maps the LDT into the kernelmode and |
201 | * usermode tables for the given mm. | 201 | * usermode tables for the given mm. |
202 | * | ||
203 | * There is no corresponding unmap function. Even if the LDT is freed, we | ||
204 | * leave the PTEs around until the slot is reused or the mm is destroyed. | ||
205 | * This is harmless: the LDT is always in ordinary memory, and no one will | ||
206 | * access the freed slot. | ||
207 | * | ||
208 | * If we wanted to unmap freed LDTs, we'd also need to do a flush to make | ||
209 | * it useful, and the flush would slow down modify_ldt(). | ||
210 | */ | 202 | */ |
211 | static int | 203 | static int |
212 | map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | 204 | map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) |
@@ -214,8 +206,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
214 | unsigned long va; | 206 | unsigned long va; |
215 | bool is_vmalloc; | 207 | bool is_vmalloc; |
216 | spinlock_t *ptl; | 208 | spinlock_t *ptl; |
217 | pgd_t *pgd; | 209 | int i, nr_pages; |
218 | int i; | ||
219 | 210 | ||
220 | if (!static_cpu_has(X86_FEATURE_PTI)) | 211 | if (!static_cpu_has(X86_FEATURE_PTI)) |
221 | return 0; | 212 | return 0; |
@@ -229,16 +220,11 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
229 | /* Check if the current mappings are sane */ | 220 | /* Check if the current mappings are sane */ |
230 | sanity_check_ldt_mapping(mm); | 221 | sanity_check_ldt_mapping(mm); |
231 | 222 | ||
232 | /* | ||
233 | * Did we already have the top level entry allocated? We can't | ||
234 | * use pgd_none() for this because it doens't do anything on | ||
235 | * 4-level page table kernels. | ||
236 | */ | ||
237 | pgd = pgd_offset(mm, LDT_BASE_ADDR); | ||
238 | |||
239 | is_vmalloc = is_vmalloc_addr(ldt->entries); | 223 | is_vmalloc = is_vmalloc_addr(ldt->entries); |
240 | 224 | ||
241 | for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) { | 225 | nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); |
226 | |||
227 | for (i = 0; i < nr_pages; i++) { | ||
242 | unsigned long offset = i << PAGE_SHIFT; | 228 | unsigned long offset = i << PAGE_SHIFT; |
243 | const void *src = (char *)ldt->entries + offset; | 229 | const void *src = (char *)ldt->entries + offset; |
244 | unsigned long pfn; | 230 | unsigned long pfn; |
@@ -272,13 +258,39 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
272 | /* Propagate LDT mapping to the user page-table */ | 258 | /* Propagate LDT mapping to the user page-table */ |
273 | map_ldt_struct_to_user(mm); | 259 | map_ldt_struct_to_user(mm); |
274 | 260 | ||
275 | va = (unsigned long)ldt_slot_va(slot); | ||
276 | flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT, false); | ||
277 | |||
278 | ldt->slot = slot; | 261 | ldt->slot = slot; |
279 | return 0; | 262 | return 0; |
280 | } | 263 | } |
281 | 264 | ||
265 | static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) | ||
266 | { | ||
267 | unsigned long va; | ||
268 | int i, nr_pages; | ||
269 | |||
270 | if (!ldt) | ||
271 | return; | ||
272 | |||
273 | /* LDT map/unmap is only required for PTI */ | ||
274 | if (!static_cpu_has(X86_FEATURE_PTI)) | ||
275 | return; | ||
276 | |||
277 | nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); | ||
278 | |||
279 | for (i = 0; i < nr_pages; i++) { | ||
280 | unsigned long offset = i << PAGE_SHIFT; | ||
281 | spinlock_t *ptl; | ||
282 | pte_t *ptep; | ||
283 | |||
284 | va = (unsigned long)ldt_slot_va(ldt->slot) + offset; | ||
285 | ptep = get_locked_pte(mm, va, &ptl); | ||
286 | pte_clear(mm, va, ptep); | ||
287 | pte_unmap_unlock(ptep, ptl); | ||
288 | } | ||
289 | |||
290 | va = (unsigned long)ldt_slot_va(ldt->slot); | ||
291 | flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false); | ||
292 | } | ||
293 | |||
282 | #else /* !CONFIG_PAGE_TABLE_ISOLATION */ | 294 | #else /* !CONFIG_PAGE_TABLE_ISOLATION */ |
283 | 295 | ||
284 | static int | 296 | static int |
@@ -286,6 +298,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
286 | { | 298 | { |
287 | return 0; | 299 | return 0; |
288 | } | 300 | } |
301 | |||
302 | static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) | ||
303 | { | ||
304 | } | ||
289 | #endif /* CONFIG_PAGE_TABLE_ISOLATION */ | 305 | #endif /* CONFIG_PAGE_TABLE_ISOLATION */ |
290 | 306 | ||
291 | static void free_ldt_pgtables(struct mm_struct *mm) | 307 | static void free_ldt_pgtables(struct mm_struct *mm) |
@@ -524,6 +540,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) | |||
524 | } | 540 | } |
525 | 541 | ||
526 | install_ldt(mm, new_ldt); | 542 | install_ldt(mm, new_ldt); |
543 | unmap_ldt_struct(mm, old_ldt); | ||
527 | free_ldt_struct(old_ldt); | 544 | free_ldt_struct(old_ldt); |
528 | error = 0; | 545 | error = 0; |
529 | 546 | ||
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 1eae5af491c2..891a75dbc131 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c | |||
@@ -26,65 +26,8 @@ | |||
26 | 26 | ||
27 | #define TOPOLOGY_REGISTER_OFFSET 0x10 | 27 | #define TOPOLOGY_REGISTER_OFFSET 0x10 |
28 | 28 | ||
29 | #if defined CONFIG_PCI && defined CONFIG_PARAVIRT_XXL | 29 | #ifdef CONFIG_PCI |
30 | /* | 30 | static void __init set_vsmp_ctl(void) |
31 | * Interrupt control on vSMPowered systems: | ||
32 | * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' | ||
33 | * and vice versa. | ||
34 | */ | ||
35 | |||
36 | asmlinkage __visible unsigned long vsmp_save_fl(void) | ||
37 | { | ||
38 | unsigned long flags = native_save_fl(); | ||
39 | |||
40 | if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC)) | ||
41 | flags &= ~X86_EFLAGS_IF; | ||
42 | return flags; | ||
43 | } | ||
44 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl); | ||
45 | |||
46 | __visible void vsmp_restore_fl(unsigned long flags) | ||
47 | { | ||
48 | if (flags & X86_EFLAGS_IF) | ||
49 | flags &= ~X86_EFLAGS_AC; | ||
50 | else | ||
51 | flags |= X86_EFLAGS_AC; | ||
52 | native_restore_fl(flags); | ||
53 | } | ||
54 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl); | ||
55 | |||
56 | asmlinkage __visible void vsmp_irq_disable(void) | ||
57 | { | ||
58 | unsigned long flags = native_save_fl(); | ||
59 | |||
60 | native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); | ||
61 | } | ||
62 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable); | ||
63 | |||
64 | asmlinkage __visible void vsmp_irq_enable(void) | ||
65 | { | ||
66 | unsigned long flags = native_save_fl(); | ||
67 | |||
68 | native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); | ||
69 | } | ||
70 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable); | ||
71 | |||
72 | static unsigned __init vsmp_patch(u8 type, void *ibuf, | ||
73 | unsigned long addr, unsigned len) | ||
74 | { | ||
75 | switch (type) { | ||
76 | case PARAVIRT_PATCH(irq.irq_enable): | ||
77 | case PARAVIRT_PATCH(irq.irq_disable): | ||
78 | case PARAVIRT_PATCH(irq.save_fl): | ||
79 | case PARAVIRT_PATCH(irq.restore_fl): | ||
80 | return paravirt_patch_default(type, ibuf, addr, len); | ||
81 | default: | ||
82 | return native_patch(type, ibuf, addr, len); | ||
83 | } | ||
84 | |||
85 | } | ||
86 | |||
87 | static void __init set_vsmp_pv_ops(void) | ||
88 | { | 31 | { |
89 | void __iomem *address; | 32 | void __iomem *address; |
90 | unsigned int cap, ctl, cfg; | 33 | unsigned int cap, ctl, cfg; |
@@ -109,28 +52,12 @@ static void __init set_vsmp_pv_ops(void) | |||
109 | } | 52 | } |
110 | #endif | 53 | #endif |
111 | 54 | ||
112 | if (cap & ctl & (1 << 4)) { | ||
113 | /* Setup irq ops and turn on vSMP IRQ fastpath handling */ | ||
114 | pv_ops.irq.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable); | ||
115 | pv_ops.irq.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable); | ||
116 | pv_ops.irq.save_fl = PV_CALLEE_SAVE(vsmp_save_fl); | ||
117 | pv_ops.irq.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl); | ||
118 | pv_ops.init.patch = vsmp_patch; | ||
119 | ctl &= ~(1 << 4); | ||
120 | } | ||
121 | writel(ctl, address + 4); | 55 | writel(ctl, address + 4); |
122 | ctl = readl(address + 4); | 56 | ctl = readl(address + 4); |
123 | pr_info("vSMP CTL: control set to:0x%08x\n", ctl); | 57 | pr_info("vSMP CTL: control set to:0x%08x\n", ctl); |
124 | 58 | ||
125 | early_iounmap(address, 8); | 59 | early_iounmap(address, 8); |
126 | } | 60 | } |
127 | #else | ||
128 | static void __init set_vsmp_pv_ops(void) | ||
129 | { | ||
130 | } | ||
131 | #endif | ||
132 | |||
133 | #ifdef CONFIG_PCI | ||
134 | static int is_vsmp = -1; | 61 | static int is_vsmp = -1; |
135 | 62 | ||
136 | static void __init detect_vsmp_box(void) | 63 | static void __init detect_vsmp_box(void) |
@@ -164,11 +91,14 @@ static int is_vsmp_box(void) | |||
164 | { | 91 | { |
165 | return 0; | 92 | return 0; |
166 | } | 93 | } |
94 | static void __init set_vsmp_ctl(void) | ||
95 | { | ||
96 | } | ||
167 | #endif | 97 | #endif |
168 | 98 | ||
169 | static void __init vsmp_cap_cpus(void) | 99 | static void __init vsmp_cap_cpus(void) |
170 | { | 100 | { |
171 | #if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) | 101 | #if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) && defined(CONFIG_PCI) |
172 | void __iomem *address; | 102 | void __iomem *address; |
173 | unsigned int cfg, topology, node_shift, maxcpus; | 103 | unsigned int cfg, topology, node_shift, maxcpus; |
174 | 104 | ||
@@ -221,6 +151,6 @@ void __init vsmp_init(void) | |||
221 | 151 | ||
222 | vsmp_cap_cpus(); | 152 | vsmp_cap_cpus(); |
223 | 153 | ||
224 | set_vsmp_pv_ops(); | 154 | set_vsmp_ctl(); |
225 | return; | 155 | return; |
226 | } | 156 | } |