aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/include/asm/cpufeature.h6
-rw-r--r--arch/x86/include/asm/jump_label.h2
-rw-r--r--arch/x86/include/asm/mutex_64.h4
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c11
-rw-r--r--arch/x86/kernel/kvm.c17
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kvm/vmx.c24
-rw-r--r--arch/x86/xen/smp.c9
10 files changed, 57 insertions, 33 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ee2fb9d37745..f67e839f06c8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -860,7 +860,7 @@ source "kernel/Kconfig.preempt"
860 860
861config X86_UP_APIC 861config X86_UP_APIC
862 bool "Local APIC support on uniprocessors" 862 bool "Local APIC support on uniprocessors"
863 depends on X86_32 && !SMP && !X86_32_NON_STANDARD 863 depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
864 ---help--- 864 ---help---
865 A local APIC (Advanced Programmable Interrupt Controller) is an 865 A local APIC (Advanced Programmable Interrupt Controller) is an
866 integrated interrupt controller in the CPU. If you have a single-CPU 866 integrated interrupt controller in the CPU. If you have a single-CPU
@@ -885,11 +885,11 @@ config X86_UP_IOAPIC
885 885
886config X86_LOCAL_APIC 886config X86_LOCAL_APIC
887 def_bool y 887 def_bool y
888 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC 888 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
889 889
890config X86_IO_APIC 890config X86_IO_APIC
891 def_bool y 891 def_bool y
892 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC 892 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI
893 893
894config X86_VISWS_APIC 894config X86_VISWS_APIC
895 def_bool y 895 def_bool y
@@ -1033,6 +1033,7 @@ config X86_REBOOTFIXUPS
1033 1033
1034config MICROCODE 1034config MICROCODE
1035 tristate "CPU microcode loading support" 1035 tristate "CPU microcode loading support"
1036 depends on CPU_SUP_AMD || CPU_SUP_INTEL
1036 select FW_LOADER 1037 select FW_LOADER
1037 ---help--- 1038 ---help---
1038 1039
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index d3f5c63078d8..89270b4318db 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -374,7 +374,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
374 * Catch too early usage of this before alternatives 374 * Catch too early usage of this before alternatives
375 * have run. 375 * have run.
376 */ 376 */
377 asm goto("1: jmp %l[t_warn]\n" 377 asm_volatile_goto("1: jmp %l[t_warn]\n"
378 "2:\n" 378 "2:\n"
379 ".section .altinstructions,\"a\"\n" 379 ".section .altinstructions,\"a\"\n"
380 " .long 1b - .\n" 380 " .long 1b - .\n"
@@ -388,7 +388,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
388 388
389#endif 389#endif
390 390
391 asm goto("1: jmp %l[t_no]\n" 391 asm_volatile_goto("1: jmp %l[t_no]\n"
392 "2:\n" 392 "2:\n"
393 ".section .altinstructions,\"a\"\n" 393 ".section .altinstructions,\"a\"\n"
394 " .long 1b - .\n" 394 " .long 1b - .\n"
@@ -453,7 +453,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
453 * have. Thus, we force the jump to the widest, 4-byte, signed relative 453 * have. Thus, we force the jump to the widest, 4-byte, signed relative
454 * offset even though the last would often fit in less bytes. 454 * offset even though the last would often fit in less bytes.
455 */ 455 */
456 asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" 456 asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
457 "2:\n" 457 "2:\n"
458 ".section .altinstructions,\"a\"\n" 458 ".section .altinstructions,\"a\"\n"
459 " .long 1b - .\n" /* src offset */ 459 " .long 1b - .\n" /* src offset */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 64507f35800c..6a2cefb4395a 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -18,7 +18,7 @@
18 18
19static __always_inline bool arch_static_branch(struct static_key *key) 19static __always_inline bool arch_static_branch(struct static_key *key)
20{ 20{
21 asm goto("1:" 21 asm_volatile_goto("1:"
22 ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" 22 ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
23 ".pushsection __jump_table, \"aw\" \n\t" 23 ".pushsection __jump_table, \"aw\" \n\t"
24 _ASM_ALIGN "\n\t" 24 _ASM_ALIGN "\n\t"
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index e7e6751648ed..07537a44216e 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -20,7 +20,7 @@
20static inline void __mutex_fastpath_lock(atomic_t *v, 20static inline void __mutex_fastpath_lock(atomic_t *v,
21 void (*fail_fn)(atomic_t *)) 21 void (*fail_fn)(atomic_t *))
22{ 22{
23 asm volatile goto(LOCK_PREFIX " decl %0\n" 23 asm_volatile_goto(LOCK_PREFIX " decl %0\n"
24 " jns %l[exit]\n" 24 " jns %l[exit]\n"
25 : : "m" (v->counter) 25 : : "m" (v->counter)
26 : "memory", "cc" 26 : "memory", "cc"
@@ -75,7 +75,7 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count)
75static inline void __mutex_fastpath_unlock(atomic_t *v, 75static inline void __mutex_fastpath_unlock(atomic_t *v,
76 void (*fail_fn)(atomic_t *)) 76 void (*fail_fn)(atomic_t *))
77{ 77{
78 asm volatile goto(LOCK_PREFIX " incl %0\n" 78 asm_volatile_goto(LOCK_PREFIX " incl %0\n"
79 " jg %l[exit]\n" 79 " jg %l[exit]\n"
80 : : "m" (v->counter) 80 : : "m" (v->counter)
81 : "memory", "cc" 81 : "memory", "cc"
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 1191ac1c9d25..a419814cea57 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -113,7 +113,7 @@ static int __init early_get_pnodeid(void)
113 break; 113 break;
114 case UV3_HUB_PART_NUMBER: 114 case UV3_HUB_PART_NUMBER:
115 case UV3_HUB_PART_NUMBER_X: 115 case UV3_HUB_PART_NUMBER_X:
116 uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1; 116 uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
117 break; 117 break;
118 } 118 }
119 119
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 897783b3302a..9d8449158cf9 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1888,10 +1888,7 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1888 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; 1888 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
1889 userpg->pmc_width = x86_pmu.cntval_bits; 1889 userpg->pmc_width = x86_pmu.cntval_bits;
1890 1890
1891 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 1891 if (!sched_clock_stable)
1892 return;
1893
1894 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
1895 return; 1892 return;
1896 1893
1897 userpg->cap_user_time = 1; 1894 userpg->cap_user_time = 1;
@@ -1899,10 +1896,8 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1899 userpg->time_shift = CYC2NS_SCALE_FACTOR; 1896 userpg->time_shift = CYC2NS_SCALE_FACTOR;
1900 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; 1897 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
1901 1898
1902 if (sched_clock_stable && !check_tsc_disabled()) { 1899 userpg->cap_user_time_zero = 1;
1903 userpg->cap_user_time_zero = 1; 1900 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1904 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1905 }
1906} 1901}
1907 1902
1908/* 1903/*
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 697b93af02dd..a0e2a8a80c94 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -775,11 +775,22 @@ void __init kvm_spinlock_init(void)
775 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) 775 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
776 return; 776 return;
777 777
778 printk(KERN_INFO "KVM setup paravirtual spinlock\n"); 778 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
779 pv_lock_ops.unlock_kick = kvm_unlock_kick;
780}
781
782static __init int kvm_spinlock_init_jump(void)
783{
784 if (!kvm_para_available())
785 return 0;
786 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
787 return 0;
779 788
780 static_key_slow_inc(&paravirt_ticketlocks_enabled); 789 static_key_slow_inc(&paravirt_ticketlocks_enabled);
790 printk(KERN_INFO "KVM setup paravirtual spinlock\n");
781 791
782 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); 792 return 0;
783 pv_lock_ops.unlock_kick = kvm_unlock_kick;
784} 793}
794early_initcall(kvm_spinlock_init_jump);
795
785#endif /* CONFIG_PARAVIRT_SPINLOCKS */ 796#endif /* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index e643e744e4d8..7e920bff99a3 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -326,6 +326,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
326 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), 326 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
327 }, 327 },
328 }, 328 },
329 { /* Handle problems with rebooting on the Latitude E5410. */
330 .callback = set_pci_reboot,
331 .ident = "Dell Latitude E5410",
332 .matches = {
333 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
334 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"),
335 },
336 },
329 { /* Handle problems with rebooting on the Latitude E5420. */ 337 { /* Handle problems with rebooting on the Latitude E5420. */
330 .callback = set_pci_reboot, 338 .callback = set_pci_reboot,
331 .ident = "Dell Latitude E5420", 339 .ident = "Dell Latitude E5420",
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3b8e7459dd4d..2b2fce1b2009 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3255,25 +3255,29 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
3255 3255
3256static void ept_load_pdptrs(struct kvm_vcpu *vcpu) 3256static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
3257{ 3257{
3258 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3259
3258 if (!test_bit(VCPU_EXREG_PDPTR, 3260 if (!test_bit(VCPU_EXREG_PDPTR,
3259 (unsigned long *)&vcpu->arch.regs_dirty)) 3261 (unsigned long *)&vcpu->arch.regs_dirty))
3260 return; 3262 return;
3261 3263
3262 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 3264 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3263 vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]); 3265 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3264 vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]); 3266 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3265 vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]); 3267 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3266 vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]); 3268 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3267 } 3269 }
3268} 3270}
3269 3271
3270static void ept_save_pdptrs(struct kvm_vcpu *vcpu) 3272static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3271{ 3273{
3274 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3275
3272 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 3276 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3273 vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); 3277 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3274 vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); 3278 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3275 vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); 3279 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3276 vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); 3280 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3277 } 3281 }
3278 3282
3279 __set_bit(VCPU_EXREG_PDPTR, 3283 __set_bit(VCPU_EXREG_PDPTR,
@@ -7777,10 +7781,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7777 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 7781 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
7778 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 7782 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
7779 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 7783 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
7780 __clear_bit(VCPU_EXREG_PDPTR,
7781 (unsigned long *)&vcpu->arch.regs_avail);
7782 __clear_bit(VCPU_EXREG_PDPTR,
7783 (unsigned long *)&vcpu->arch.regs_dirty);
7784 } 7784 }
7785 7785
7786 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); 7786 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index d1e4777b4e75..31d04758b76f 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -278,6 +278,15 @@ static void __init xen_smp_prepare_boot_cpu(void)
278 old memory can be recycled */ 278 old memory can be recycled */
279 make_lowmem_page_readwrite(xen_initial_gdt); 279 make_lowmem_page_readwrite(xen_initial_gdt);
280 280
281#ifdef CONFIG_X86_32
282 /*
283 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
284 * expects __USER_DS
285 */
286 loadsegment(ds, __USER_DS);
287 loadsegment(es, __USER_DS);
288#endif
289
281 xen_filter_cpu_maps(); 290 xen_filter_cpu_maps();
282 xen_setup_vcpu_info_placement(); 291 xen_setup_vcpu_info_placement();
283 } 292 }