diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-08 18:21:48 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-08 18:21:48 -0500 |
commit | 650e5455d83dafb465b478000507468152d3c523 (patch) | |
tree | 0b71450dce5293056863acb845af4497ef52415d | |
parent | de030179584833ddac77ab847d7083199e30a877 (diff) | |
parent | dd7a5ab495019d424c2b0747892eb2e38a052ba5 (diff) |
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar:
"A handful of x86 fixes:
- a syscall ABI fix, fixing an Android breakage
- a Xen PV guest fix relating to the RTC device, causing a
non-working console
- a Xen guest syscall stack frame fix
- an MCE hotplug CPU crash fix"
* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/numachip: Fix NumaConnect2 MMCFG PCI access
x86/entry: Restore traditional SYSENTER calling convention
x86/entry: Fix some comments
x86/paravirt: Prevent rtc_cmos platform device init on PV guests
x86/xen: Avoid fast syscall path for Xen PV guests
x86/mce: Ensure offline CPUs don't participate in rendezvous process
-rw-r--r-- | arch/x86/entry/common.c | 6 | ||||
-rw-r--r-- | arch/x86/entry/entry_32.S | 7 | ||||
-rw-r--r-- | arch/x86/entry/entry_64_compat.S | 20 | ||||
-rw-r--r-- | arch/x86/entry/vdso/vdso32/system_call.S | 54 | ||||
-rw-r--r-- | arch/x86/include/asm/cpufeature.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt_types.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 1 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic_numachip.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/rtc.c | 3 | ||||
-rw-r--r-- | arch/x86/lguest/boot.c | 1 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 8 |
13 files changed, 96 insertions, 32 deletions
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index a89fdbc1f0be..03663740c866 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c | |||
@@ -421,7 +421,7 @@ __visible long do_fast_syscall_32(struct pt_regs *regs) | |||
421 | regs->ip = landing_pad; | 421 | regs->ip = landing_pad; |
422 | 422 | ||
423 | /* | 423 | /* |
424 | * Fetch ECX from where the vDSO stashed it. | 424 | * Fetch EBP from where the vDSO stashed it. |
425 | * | 425 | * |
426 | * WARNING: We are in CONTEXT_USER and RCU isn't paying attention! | 426 | * WARNING: We are in CONTEXT_USER and RCU isn't paying attention! |
427 | */ | 427 | */ |
@@ -432,10 +432,10 @@ __visible long do_fast_syscall_32(struct pt_regs *regs) | |||
432 | * Micro-optimization: the pointer we're following is explicitly | 432 | * Micro-optimization: the pointer we're following is explicitly |
433 | * 32 bits, so it can't be out of range. | 433 | * 32 bits, so it can't be out of range. |
434 | */ | 434 | */ |
435 | __get_user(*(u32 *)®s->cx, | 435 | __get_user(*(u32 *)®s->bp, |
436 | (u32 __user __force *)(unsigned long)(u32)regs->sp) | 436 | (u32 __user __force *)(unsigned long)(u32)regs->sp) |
437 | #else | 437 | #else |
438 | get_user(*(u32 *)®s->cx, | 438 | get_user(*(u32 *)®s->bp, |
439 | (u32 __user __force *)(unsigned long)(u32)regs->sp) | 439 | (u32 __user __force *)(unsigned long)(u32)regs->sp) |
440 | #endif | 440 | #endif |
441 | ) { | 441 | ) { |
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 3eb572ed3d7a..f3b6d54e0042 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S | |||
@@ -292,7 +292,7 @@ ENTRY(entry_SYSENTER_32) | |||
292 | movl TSS_sysenter_sp0(%esp), %esp | 292 | movl TSS_sysenter_sp0(%esp), %esp |
293 | sysenter_past_esp: | 293 | sysenter_past_esp: |
294 | pushl $__USER_DS /* pt_regs->ss */ | 294 | pushl $__USER_DS /* pt_regs->ss */ |
295 | pushl %ecx /* pt_regs->cx */ | 295 | pushl %ebp /* pt_regs->sp (stashed in bp) */ |
296 | pushfl /* pt_regs->flags (except IF = 0) */ | 296 | pushfl /* pt_regs->flags (except IF = 0) */ |
297 | orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ | 297 | orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ |
298 | pushl $__USER_CS /* pt_regs->cs */ | 298 | pushl $__USER_CS /* pt_regs->cs */ |
@@ -308,8 +308,9 @@ sysenter_past_esp: | |||
308 | 308 | ||
309 | movl %esp, %eax | 309 | movl %esp, %eax |
310 | call do_fast_syscall_32 | 310 | call do_fast_syscall_32 |
311 | testl %eax, %eax | 311 | /* XEN PV guests always use IRET path */ |
312 | jz .Lsyscall_32_done | 312 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ |
313 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV | ||
313 | 314 | ||
314 | /* Opportunistic SYSEXIT */ | 315 | /* Opportunistic SYSEXIT */ |
315 | TRACE_IRQS_ON /* User mode traces as IRQs on. */ | 316 | TRACE_IRQS_ON /* User mode traces as IRQs on. */ |
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index c3201830a85e..6a1ae3751e82 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S | |||
@@ -63,7 +63,7 @@ ENTRY(entry_SYSENTER_compat) | |||
63 | 63 | ||
64 | /* Construct struct pt_regs on stack */ | 64 | /* Construct struct pt_regs on stack */ |
65 | pushq $__USER32_DS /* pt_regs->ss */ | 65 | pushq $__USER32_DS /* pt_regs->ss */ |
66 | pushq %rcx /* pt_regs->sp */ | 66 | pushq %rbp /* pt_regs->sp (stashed in bp) */ |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * Push flags. This is nasty. First, interrupts are currently | 69 | * Push flags. This is nasty. First, interrupts are currently |
@@ -82,14 +82,14 @@ ENTRY(entry_SYSENTER_compat) | |||
82 | pushq %rdi /* pt_regs->di */ | 82 | pushq %rdi /* pt_regs->di */ |
83 | pushq %rsi /* pt_regs->si */ | 83 | pushq %rsi /* pt_regs->si */ |
84 | pushq %rdx /* pt_regs->dx */ | 84 | pushq %rdx /* pt_regs->dx */ |
85 | pushq %rcx /* pt_regs->cx (will be overwritten) */ | 85 | pushq %rcx /* pt_regs->cx */ |
86 | pushq $-ENOSYS /* pt_regs->ax */ | 86 | pushq $-ENOSYS /* pt_regs->ax */ |
87 | pushq %r8 /* pt_regs->r8 = 0 */ | 87 | pushq %r8 /* pt_regs->r8 = 0 */ |
88 | pushq %r8 /* pt_regs->r9 = 0 */ | 88 | pushq %r8 /* pt_regs->r9 = 0 */ |
89 | pushq %r8 /* pt_regs->r10 = 0 */ | 89 | pushq %r8 /* pt_regs->r10 = 0 */ |
90 | pushq %r8 /* pt_regs->r11 = 0 */ | 90 | pushq %r8 /* pt_regs->r11 = 0 */ |
91 | pushq %rbx /* pt_regs->rbx */ | 91 | pushq %rbx /* pt_regs->rbx */ |
92 | pushq %rbp /* pt_regs->rbp */ | 92 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
93 | pushq %r8 /* pt_regs->r12 = 0 */ | 93 | pushq %r8 /* pt_regs->r12 = 0 */ |
94 | pushq %r8 /* pt_regs->r13 = 0 */ | 94 | pushq %r8 /* pt_regs->r13 = 0 */ |
95 | pushq %r8 /* pt_regs->r14 = 0 */ | 95 | pushq %r8 /* pt_regs->r14 = 0 */ |
@@ -121,8 +121,9 @@ sysenter_flags_fixed: | |||
121 | 121 | ||
122 | movq %rsp, %rdi | 122 | movq %rsp, %rdi |
123 | call do_fast_syscall_32 | 123 | call do_fast_syscall_32 |
124 | testl %eax, %eax | 124 | /* XEN PV guests always use IRET path */ |
125 | jz .Lsyscall_32_done | 125 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ |
126 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV | ||
126 | jmp sysret32_from_system_call | 127 | jmp sysret32_from_system_call |
127 | 128 | ||
128 | sysenter_fix_flags: | 129 | sysenter_fix_flags: |
@@ -178,7 +179,7 @@ ENTRY(entry_SYSCALL_compat) | |||
178 | pushq %rdi /* pt_regs->di */ | 179 | pushq %rdi /* pt_regs->di */ |
179 | pushq %rsi /* pt_regs->si */ | 180 | pushq %rsi /* pt_regs->si */ |
180 | pushq %rdx /* pt_regs->dx */ | 181 | pushq %rdx /* pt_regs->dx */ |
181 | pushq %rcx /* pt_regs->cx (will be overwritten) */ | 182 | pushq %rbp /* pt_regs->cx (stashed in bp) */ |
182 | pushq $-ENOSYS /* pt_regs->ax */ | 183 | pushq $-ENOSYS /* pt_regs->ax */ |
183 | xorq %r8,%r8 | 184 | xorq %r8,%r8 |
184 | pushq %r8 /* pt_regs->r8 = 0 */ | 185 | pushq %r8 /* pt_regs->r8 = 0 */ |
@@ -186,7 +187,7 @@ ENTRY(entry_SYSCALL_compat) | |||
186 | pushq %r8 /* pt_regs->r10 = 0 */ | 187 | pushq %r8 /* pt_regs->r10 = 0 */ |
187 | pushq %r8 /* pt_regs->r11 = 0 */ | 188 | pushq %r8 /* pt_regs->r11 = 0 */ |
188 | pushq %rbx /* pt_regs->rbx */ | 189 | pushq %rbx /* pt_regs->rbx */ |
189 | pushq %rbp /* pt_regs->rbp */ | 190 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
190 | pushq %r8 /* pt_regs->r12 = 0 */ | 191 | pushq %r8 /* pt_regs->r12 = 0 */ |
191 | pushq %r8 /* pt_regs->r13 = 0 */ | 192 | pushq %r8 /* pt_regs->r13 = 0 */ |
192 | pushq %r8 /* pt_regs->r14 = 0 */ | 193 | pushq %r8 /* pt_regs->r14 = 0 */ |
@@ -200,8 +201,9 @@ ENTRY(entry_SYSCALL_compat) | |||
200 | 201 | ||
201 | movq %rsp, %rdi | 202 | movq %rsp, %rdi |
202 | call do_fast_syscall_32 | 203 | call do_fast_syscall_32 |
203 | testl %eax, %eax | 204 | /* XEN PV guests always use IRET path */ |
204 | jz .Lsyscall_32_done | 205 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ |
206 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV | ||
205 | 207 | ||
206 | /* Opportunistic SYSRET */ | 208 | /* Opportunistic SYSRET */ |
207 | sysret32_from_system_call: | 209 | sysret32_from_system_call: |
diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S index 93bd8452383f..3a1d9297074b 100644 --- a/arch/x86/entry/vdso/vdso32/system_call.S +++ b/arch/x86/entry/vdso/vdso32/system_call.S | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Code for the vDSO. This version uses the old int $0x80 method. | 2 | * AT_SYSINFO entry point |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <asm/dwarf2.h> | 5 | #include <asm/dwarf2.h> |
@@ -21,35 +21,67 @@ __kernel_vsyscall: | |||
21 | /* | 21 | /* |
22 | * Reshuffle regs so that all of any of the entry instructions | 22 | * Reshuffle regs so that all of any of the entry instructions |
23 | * will preserve enough state. | 23 | * will preserve enough state. |
24 | * | ||
25 | * A really nice entry sequence would be: | ||
26 | * pushl %edx | ||
27 | * pushl %ecx | ||
28 | * movl %esp, %ecx | ||
29 | * | ||
30 | * Unfortunately, naughty Android versions between July and December | ||
31 | * 2015 actually hardcode the traditional Linux SYSENTER entry | ||
32 | * sequence. That is severely broken for a number of reasons (ask | ||
33 | * anyone with an AMD CPU, for example). Nonetheless, we try to keep | ||
34 | * it working approximately as well as it ever worked. | ||
35 | * | ||
36 | * This link may eludicate some of the history: | ||
37 | * https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7 | ||
38 | * personally, I find it hard to understand what's going on there. | ||
39 | * | ||
40 | * Note to future user developers: DO NOT USE SYSENTER IN YOUR CODE. | ||
41 | * Execute an indirect call to the address in the AT_SYSINFO auxv | ||
42 | * entry. That is the ONLY correct way to make a fast 32-bit system | ||
43 | * call on Linux. (Open-coding int $0x80 is also fine, but it's | ||
44 | * slow.) | ||
24 | */ | 45 | */ |
46 | pushl %ecx | ||
47 | CFI_ADJUST_CFA_OFFSET 4 | ||
48 | CFI_REL_OFFSET ecx, 0 | ||
25 | pushl %edx | 49 | pushl %edx |
26 | CFI_ADJUST_CFA_OFFSET 4 | 50 | CFI_ADJUST_CFA_OFFSET 4 |
27 | CFI_REL_OFFSET edx, 0 | 51 | CFI_REL_OFFSET edx, 0 |
28 | pushl %ecx | 52 | pushl %ebp |
29 | CFI_ADJUST_CFA_OFFSET 4 | 53 | CFI_ADJUST_CFA_OFFSET 4 |
30 | CFI_REL_OFFSET ecx, 0 | 54 | CFI_REL_OFFSET ebp, 0 |
31 | movl %esp, %ecx | 55 | |
56 | #define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter" | ||
57 | #define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall" | ||
32 | 58 | ||
33 | #ifdef CONFIG_X86_64 | 59 | #ifdef CONFIG_X86_64 |
34 | /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */ | 60 | /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */ |
35 | ALTERNATIVE_2 "", "sysenter", X86_FEATURE_SYSENTER32, \ | 61 | ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \ |
36 | "syscall", X86_FEATURE_SYSCALL32 | 62 | SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32 |
37 | #else | 63 | #else |
38 | ALTERNATIVE "", "sysenter", X86_FEATURE_SEP | 64 | ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP |
39 | #endif | 65 | #endif |
40 | 66 | ||
41 | /* Enter using int $0x80 */ | 67 | /* Enter using int $0x80 */ |
42 | movl (%esp), %ecx | ||
43 | int $0x80 | 68 | int $0x80 |
44 | GLOBAL(int80_landing_pad) | 69 | GLOBAL(int80_landing_pad) |
45 | 70 | ||
46 | /* Restore ECX and EDX in case they were clobbered. */ | 71 | /* |
47 | popl %ecx | 72 | * Restore EDX and ECX in case they were clobbered. EBP is not |
48 | CFI_RESTORE ecx | 73 | * clobbered (the kernel restores it), but it's cleaner and |
74 | * probably faster to pop it than to adjust ESP using addl. | ||
75 | */ | ||
76 | popl %ebp | ||
77 | CFI_RESTORE ebp | ||
49 | CFI_ADJUST_CFA_OFFSET -4 | 78 | CFI_ADJUST_CFA_OFFSET -4 |
50 | popl %edx | 79 | popl %edx |
51 | CFI_RESTORE edx | 80 | CFI_RESTORE edx |
52 | CFI_ADJUST_CFA_OFFSET -4 | 81 | CFI_ADJUST_CFA_OFFSET -4 |
82 | popl %ecx | ||
83 | CFI_RESTORE ecx | ||
84 | CFI_ADJUST_CFA_OFFSET -4 | ||
53 | ret | 85 | ret |
54 | CFI_ENDPROC | 86 | CFI_ENDPROC |
55 | 87 | ||
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index e4f8010f22e0..f7ba9fbf12ee 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -216,6 +216,7 @@ | |||
216 | #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */ | 216 | #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */ |
217 | #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */ | 217 | #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */ |
218 | #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ | 218 | #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ |
219 | #define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ | ||
219 | 220 | ||
220 | 221 | ||
221 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ | 222 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 10d0596433f8..c759b3cca663 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -19,6 +19,12 @@ static inline int paravirt_enabled(void) | |||
19 | return pv_info.paravirt_enabled; | 19 | return pv_info.paravirt_enabled; |
20 | } | 20 | } |
21 | 21 | ||
22 | static inline int paravirt_has_feature(unsigned int feature) | ||
23 | { | ||
24 | WARN_ON_ONCE(!pv_info.paravirt_enabled); | ||
25 | return (pv_info.features & feature); | ||
26 | } | ||
27 | |||
22 | static inline void load_sp0(struct tss_struct *tss, | 28 | static inline void load_sp0(struct tss_struct *tss, |
23 | struct thread_struct *thread) | 29 | struct thread_struct *thread) |
24 | { | 30 | { |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 31247b5bff7c..3d44191185f8 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -70,9 +70,14 @@ struct pv_info { | |||
70 | #endif | 70 | #endif |
71 | 71 | ||
72 | int paravirt_enabled; | 72 | int paravirt_enabled; |
73 | unsigned int features; /* valid only if paravirt_enabled is set */ | ||
73 | const char *name; | 74 | const char *name; |
74 | }; | 75 | }; |
75 | 76 | ||
77 | #define paravirt_has(x) paravirt_has_feature(PV_SUPPORTED_##x) | ||
78 | /* Supported features */ | ||
79 | #define PV_SUPPORTED_RTC (1<<0) | ||
80 | |||
76 | struct pv_init_ops { | 81 | struct pv_init_ops { |
77 | /* | 82 | /* |
78 | * Patch may replace one of the defined code sequences with | 83 | * Patch may replace one of the defined code sequences with |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 67522256c7ff..2d5a50cb61a2 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -472,6 +472,7 @@ static inline unsigned long current_top_of_stack(void) | |||
472 | #else | 472 | #else |
473 | #define __cpuid native_cpuid | 473 | #define __cpuid native_cpuid |
474 | #define paravirt_enabled() 0 | 474 | #define paravirt_enabled() 0 |
475 | #define paravirt_has(x) 0 | ||
475 | 476 | ||
476 | static inline void load_sp0(struct tss_struct *tss, | 477 | static inline void load_sp0(struct tss_struct *tss, |
477 | struct thread_struct *thread) | 478 | struct thread_struct *thread) |
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index 38dd5efdd04c..2bd2292a316d 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c | |||
@@ -193,20 +193,17 @@ static int __init numachip_system_init(void) | |||
193 | case 1: | 193 | case 1: |
194 | init_extra_mapping_uc(NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_SIZE); | 194 | init_extra_mapping_uc(NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_SIZE); |
195 | numachip_apic_icr_write = numachip1_apic_icr_write; | 195 | numachip_apic_icr_write = numachip1_apic_icr_write; |
196 | x86_init.pci.arch_init = pci_numachip_init; | ||
197 | break; | 196 | break; |
198 | case 2: | 197 | case 2: |
199 | init_extra_mapping_uc(NUMACHIP2_LCSR_BASE, NUMACHIP2_LCSR_SIZE); | 198 | init_extra_mapping_uc(NUMACHIP2_LCSR_BASE, NUMACHIP2_LCSR_SIZE); |
200 | numachip_apic_icr_write = numachip2_apic_icr_write; | 199 | numachip_apic_icr_write = numachip2_apic_icr_write; |
201 | |||
202 | /* Use MCFG config cycles rather than locked CF8 cycles */ | ||
203 | raw_pci_ops = &pci_mmcfg; | ||
204 | break; | 200 | break; |
205 | default: | 201 | default: |
206 | return 0; | 202 | return 0; |
207 | } | 203 | } |
208 | 204 | ||
209 | x86_cpuinit.fixup_cpu_id = fixup_cpu_id; | 205 | x86_cpuinit.fixup_cpu_id = fixup_cpu_id; |
206 | x86_init.pci.arch_init = pci_numachip_init; | ||
210 | 207 | ||
211 | return 0; | 208 | return 0; |
212 | } | 209 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index c5b0d562dbf5..7e8a736d09db 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -999,6 +999,17 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
999 | int flags = MF_ACTION_REQUIRED; | 999 | int flags = MF_ACTION_REQUIRED; |
1000 | int lmce = 0; | 1000 | int lmce = 0; |
1001 | 1001 | ||
1002 | /* If this CPU is offline, just bail out. */ | ||
1003 | if (cpu_is_offline(smp_processor_id())) { | ||
1004 | u64 mcgstatus; | ||
1005 | |||
1006 | mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); | ||
1007 | if (mcgstatus & MCG_STATUS_RIPV) { | ||
1008 | mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); | ||
1009 | return; | ||
1010 | } | ||
1011 | } | ||
1012 | |||
1002 | ist_enter(regs); | 1013 | ist_enter(regs); |
1003 | 1014 | ||
1004 | this_cpu_inc(mce_exception_count); | 1015 | this_cpu_inc(mce_exception_count); |
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index cd9685235df9..4af8d063fb36 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c | |||
@@ -200,6 +200,9 @@ static __init int add_rtc_cmos(void) | |||
200 | } | 200 | } |
201 | #endif | 201 | #endif |
202 | 202 | ||
203 | if (paravirt_enabled() && !paravirt_has(RTC)) | ||
204 | return -ENODEV; | ||
205 | |||
203 | platform_device_register(&rtc_device); | 206 | platform_device_register(&rtc_device); |
204 | dev_info(&rtc_device.dev, | 207 | dev_info(&rtc_device.dev, |
205 | "registered platform RTC device (no PNP device found)\n"); | 208 | "registered platform RTC device (no PNP device found)\n"); |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index a0d09f6c6533..a43b2eafc466 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -1414,6 +1414,7 @@ __init void lguest_init(void) | |||
1414 | pv_info.kernel_rpl = 1; | 1414 | pv_info.kernel_rpl = 1; |
1415 | /* Everyone except Xen runs with this set. */ | 1415 | /* Everyone except Xen runs with this set. */ |
1416 | pv_info.shared_kernel_pmd = 1; | 1416 | pv_info.shared_kernel_pmd = 1; |
1417 | pv_info.features = 0; | ||
1417 | 1418 | ||
1418 | /* | 1419 | /* |
1419 | * We set up all the lguest overrides for sensitive operations. These | 1420 | * We set up all the lguest overrides for sensitive operations. These |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 5774800ff583..b7de78bdc09c 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1192,7 +1192,7 @@ static const struct pv_info xen_info __initconst = { | |||
1192 | #ifdef CONFIG_X86_64 | 1192 | #ifdef CONFIG_X86_64 |
1193 | .extra_user_64bit_cs = FLAT_USER_CS64, | 1193 | .extra_user_64bit_cs = FLAT_USER_CS64, |
1194 | #endif | 1194 | #endif |
1195 | 1195 | .features = 0, | |
1196 | .name = "Xen", | 1196 | .name = "Xen", |
1197 | }; | 1197 | }; |
1198 | 1198 | ||
@@ -1535,6 +1535,8 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1535 | 1535 | ||
1536 | /* Install Xen paravirt ops */ | 1536 | /* Install Xen paravirt ops */ |
1537 | pv_info = xen_info; | 1537 | pv_info = xen_info; |
1538 | if (xen_initial_domain()) | ||
1539 | pv_info.features |= PV_SUPPORTED_RTC; | ||
1538 | pv_init_ops = xen_init_ops; | 1540 | pv_init_ops = xen_init_ops; |
1539 | pv_apic_ops = xen_apic_ops; | 1541 | pv_apic_ops = xen_apic_ops; |
1540 | if (!xen_pvh_domain()) { | 1542 | if (!xen_pvh_domain()) { |
@@ -1886,8 +1888,10 @@ EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); | |||
1886 | 1888 | ||
1887 | static void xen_set_cpu_features(struct cpuinfo_x86 *c) | 1889 | static void xen_set_cpu_features(struct cpuinfo_x86 *c) |
1888 | { | 1890 | { |
1889 | if (xen_pv_domain()) | 1891 | if (xen_pv_domain()) { |
1890 | clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); | 1892 | clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); |
1893 | set_cpu_cap(c, X86_FEATURE_XENPV); | ||
1894 | } | ||
1891 | } | 1895 | } |
1892 | 1896 | ||
1893 | const struct hypervisor_x86 x86_hyper_xen = { | 1897 | const struct hypervisor_x86 x86_hyper_xen = { |