diff options
author | Juergen Gross <jgross@suse.com> | 2017-11-24 03:42:21 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2017-11-27 18:28:56 -0500 |
commit | 42b3a4cb5609de757f5445fcad18945ba9239a07 (patch) | |
tree | 84c2436f6e3c08531701fea29d9667cd6bdab520 | |
parent | 9d0b62328d34c7044114d4f4281981d4c537c4ba (diff) |
x86/xen: Support early interrupts in xen pv guests
Add early interrupt handlers activated by idt_setup_early_handler() to
the handlers supported by Xen pv guests. This will allow for early
WARN() calls not crashing the guest.
Suggested-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: xen-devel@lists.xenproject.org
Cc: boris.ostrovsky@oracle.com
Link: https://lkml.kernel.org/r/20171124084221.30172-1-jgross@suse.com
-rw-r--r-- | arch/x86/include/asm/segment.h | 12 | ||||
-rw-r--r-- | arch/x86/mm/extable.c | 4 | ||||
-rw-r--r-- | arch/x86/xen/enlighten_pv.c | 37 | ||||
-rw-r--r-- | arch/x86/xen/xen-asm_64.S | 14 |
4 files changed, 53 insertions, 14 deletions
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index b20f9d623f9c..8f09012b92e7 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h | |||
@@ -236,11 +236,23 @@ | |||
236 | */ | 236 | */ |
237 | #define EARLY_IDT_HANDLER_SIZE 9 | 237 | #define EARLY_IDT_HANDLER_SIZE 9 |
238 | 238 | ||
239 | /* | ||
240 | * xen_early_idt_handler_array is for Xen pv guests: for each entry in | ||
241 | * early_idt_handler_array it contains a prequel in the form of | ||
242 | * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to | ||
243 | * max 8 bytes. | ||
244 | */ | ||
245 | #define XEN_EARLY_IDT_HANDLER_SIZE 8 | ||
246 | |||
239 | #ifndef __ASSEMBLY__ | 247 | #ifndef __ASSEMBLY__ |
240 | 248 | ||
241 | extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE]; | 249 | extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE]; |
242 | extern void early_ignore_irq(void); | 250 | extern void early_ignore_irq(void); |
243 | 251 | ||
252 | #if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV) | ||
253 | extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE]; | ||
254 | #endif | ||
255 | |||
244 | /* | 256 | /* |
245 | * Load a segment. Fall back on loading the zero segment if something goes | 257 | * Load a segment. Fall back on loading the zero segment if something goes |
246 | * wrong. This variant assumes that loading zero fully clears the segment. | 258 | * wrong. This variant assumes that loading zero fully clears the segment. |
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 3321b446b66c..88754bfd425f 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/extable.h> | 1 | #include <linux/extable.h> |
2 | #include <linux/uaccess.h> | 2 | #include <linux/uaccess.h> |
3 | #include <linux/sched/debug.h> | 3 | #include <linux/sched/debug.h> |
4 | #include <xen/xen.h> | ||
4 | 5 | ||
5 | #include <asm/fpu/internal.h> | 6 | #include <asm/fpu/internal.h> |
6 | #include <asm/traps.h> | 7 | #include <asm/traps.h> |
@@ -212,8 +213,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr) | |||
212 | * Old CPUs leave the high bits of CS on the stack | 213 | * Old CPUs leave the high bits of CS on the stack |
213 | * undefined. I'm not sure which CPUs do this, but at least | 214 | * undefined. I'm not sure which CPUs do this, but at least |
214 | * the 486 DX works this way. | 215 | * the 486 DX works this way. |
216 | * Xen pv domains are not using the default __KERNEL_CS. | ||
215 | */ | 217 | */ |
216 | if (regs->cs != __KERNEL_CS) | 218 | if (!xen_pv_domain() && regs->cs != __KERNEL_CS) |
217 | goto fail; | 219 | goto fail; |
218 | 220 | ||
219 | /* | 221 | /* |
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 5b2b3f3f6531..f2414c6c5e7c 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c | |||
@@ -622,7 +622,7 @@ static struct trap_array_entry trap_array[] = { | |||
622 | { simd_coprocessor_error, xen_simd_coprocessor_error, false }, | 622 | { simd_coprocessor_error, xen_simd_coprocessor_error, false }, |
623 | }; | 623 | }; |
624 | 624 | ||
625 | static bool get_trap_addr(void **addr, unsigned int ist) | 625 | static bool __ref get_trap_addr(void **addr, unsigned int ist) |
626 | { | 626 | { |
627 | unsigned int nr; | 627 | unsigned int nr; |
628 | bool ist_okay = false; | 628 | bool ist_okay = false; |
@@ -644,6 +644,14 @@ static bool get_trap_addr(void **addr, unsigned int ist) | |||
644 | } | 644 | } |
645 | } | 645 | } |
646 | 646 | ||
647 | if (nr == ARRAY_SIZE(trap_array) && | ||
648 | *addr >= (void *)early_idt_handler_array[0] && | ||
649 | *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) { | ||
650 | nr = (*addr - (void *)early_idt_handler_array[0]) / | ||
651 | EARLY_IDT_HANDLER_SIZE; | ||
652 | *addr = (void *)xen_early_idt_handler_array[nr]; | ||
653 | } | ||
654 | |||
647 | if (WARN_ON(ist != 0 && !ist_okay)) | 655 | if (WARN_ON(ist != 0 && !ist_okay)) |
648 | return false; | 656 | return false; |
649 | 657 | ||
@@ -1262,6 +1270,21 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1262 | xen_setup_gdt(0); | 1270 | xen_setup_gdt(0); |
1263 | 1271 | ||
1264 | xen_init_irq_ops(); | 1272 | xen_init_irq_ops(); |
1273 | |||
1274 | /* Let's presume PV guests always boot on vCPU with id 0. */ | ||
1275 | per_cpu(xen_vcpu_id, 0) = 0; | ||
1276 | |||
1277 | /* | ||
1278 | * Setup xen_vcpu early because idt_setup_early_handler needs it for | ||
1279 | * local_irq_disable(), irqs_disabled(). | ||
1280 | * | ||
1281 | * Don't do the full vcpu_info placement stuff until we have | ||
1282 | * the cpu_possible_mask and a non-dummy shared_info. | ||
1283 | */ | ||
1284 | xen_vcpu_info_reset(0); | ||
1285 | |||
1286 | idt_setup_early_handler(); | ||
1287 | |||
1265 | xen_init_capabilities(); | 1288 | xen_init_capabilities(); |
1266 | 1289 | ||
1267 | #ifdef CONFIG_X86_LOCAL_APIC | 1290 | #ifdef CONFIG_X86_LOCAL_APIC |
@@ -1295,18 +1318,6 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1295 | */ | 1318 | */ |
1296 | acpi_numa = -1; | 1319 | acpi_numa = -1; |
1297 | #endif | 1320 | #endif |
1298 | /* Let's presume PV guests always boot on vCPU with id 0. */ | ||
1299 | per_cpu(xen_vcpu_id, 0) = 0; | ||
1300 | |||
1301 | /* | ||
1302 | * Setup xen_vcpu early because start_kernel needs it for | ||
1303 | * local_irq_disable(), irqs_disabled(). | ||
1304 | * | ||
1305 | * Don't do the full vcpu_info placement stuff until we have | ||
1306 | * the cpu_possible_mask and a non-dummy shared_info. | ||
1307 | */ | ||
1308 | xen_vcpu_info_reset(0); | ||
1309 | |||
1310 | WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv)); | 1321 | WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv)); |
1311 | 1322 | ||
1312 | local_irq_disable(); | 1323 | local_irq_disable(); |
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index 8a10c9a9e2b5..417b339e5c8e 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <xen/interface/xen.h> | 16 | #include <xen/interface/xen.h> |
17 | 17 | ||
18 | #include <linux/init.h> | ||
18 | #include <linux/linkage.h> | 19 | #include <linux/linkage.h> |
19 | 20 | ||
20 | .macro xen_pv_trap name | 21 | .macro xen_pv_trap name |
@@ -54,6 +55,19 @@ xen_pv_trap entry_INT80_compat | |||
54 | #endif | 55 | #endif |
55 | xen_pv_trap hypervisor_callback | 56 | xen_pv_trap hypervisor_callback |
56 | 57 | ||
58 | __INIT | ||
59 | ENTRY(xen_early_idt_handler_array) | ||
60 | i = 0 | ||
61 | .rept NUM_EXCEPTION_VECTORS | ||
62 | pop %rcx | ||
63 | pop %r11 | ||
64 | jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE | ||
65 | i = i + 1 | ||
66 | .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc | ||
67 | .endr | ||
68 | END(xen_early_idt_handler_array) | ||
69 | __FINIT | ||
70 | |||
57 | hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 | 71 | hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 |
58 | /* | 72 | /* |
59 | * Xen64 iret frame: | 73 | * Xen64 iret frame: |