aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/xsave.h28
-rw-r--r--arch/x86/kernel/entry_64.S13
-rw-r--r--arch/x86/kvm/emulate.c3
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c23
-rw-r--r--arch/x86/pci/acpi.c11
8 files changed, 46 insertions, 43 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c2fb8a87dccb..b7d31ca55187 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -499,6 +499,7 @@ config X86_INTEL_QUARK
499 depends on X86_IO_APIC 499 depends on X86_IO_APIC
500 select IOSF_MBI 500 select IOSF_MBI
501 select INTEL_IMR 501 select INTEL_IMR
502 select COMMON_CLK
502 ---help--- 503 ---help---
503 Select to include support for Quark X1000 SoC. 504 Select to include support for Quark X1000 SoC.
504 Say Y here if you have a Quark based system such as the Arduino 505 Say Y here if you have a Quark based system such as the Arduino
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 5fa9770035dc..c9a6d68b8d62 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -82,18 +82,15 @@ static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
82 if (boot_cpu_has(X86_FEATURE_XSAVES)) 82 if (boot_cpu_has(X86_FEATURE_XSAVES))
83 asm volatile("1:"XSAVES"\n\t" 83 asm volatile("1:"XSAVES"\n\t"
84 "2:\n\t" 84 "2:\n\t"
85 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) 85 xstate_fault
86 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
86 : "memory"); 87 : "memory");
87 else 88 else
88 asm volatile("1:"XSAVE"\n\t" 89 asm volatile("1:"XSAVE"\n\t"
89 "2:\n\t" 90 "2:\n\t"
90 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) 91 xstate_fault
92 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
91 : "memory"); 93 : "memory");
92
93 asm volatile(xstate_fault
94 : "0" (0)
95 : "memory");
96
97 return err; 94 return err;
98} 95}
99 96
@@ -112,18 +109,15 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
112 if (boot_cpu_has(X86_FEATURE_XSAVES)) 109 if (boot_cpu_has(X86_FEATURE_XSAVES))
113 asm volatile("1:"XRSTORS"\n\t" 110 asm volatile("1:"XRSTORS"\n\t"
114 "2:\n\t" 111 "2:\n\t"
115 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) 112 xstate_fault
113 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
116 : "memory"); 114 : "memory");
117 else 115 else
118 asm volatile("1:"XRSTOR"\n\t" 116 asm volatile("1:"XRSTOR"\n\t"
119 "2:\n\t" 117 "2:\n\t"
120 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) 118 xstate_fault
119 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
121 : "memory"); 120 : "memory");
122
123 asm volatile(xstate_fault
124 : "0" (0)
125 : "memory");
126
127 return err; 121 return err;
128} 122}
129 123
@@ -149,9 +143,9 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask)
149 */ 143 */
150 alternative_input_2( 144 alternative_input_2(
151 "1:"XSAVE, 145 "1:"XSAVE,
152 "1:"XSAVEOPT, 146 XSAVEOPT,
153 X86_FEATURE_XSAVEOPT, 147 X86_FEATURE_XSAVEOPT,
154 "1:"XSAVES, 148 XSAVES,
155 X86_FEATURE_XSAVES, 149 X86_FEATURE_XSAVES,
156 [fx] "D" (fx), "a" (lmask), "d" (hmask) : 150 [fx] "D" (fx), "a" (lmask), "d" (hmask) :
157 "memory"); 151 "memory");
@@ -178,7 +172,7 @@ static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
178 */ 172 */
179 alternative_input( 173 alternative_input(
180 "1: " XRSTOR, 174 "1: " XRSTOR,
181 "1: " XRSTORS, 175 XRSTORS,
182 X86_FEATURE_XSAVES, 176 X86_FEATURE_XSAVES,
183 "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) 177 "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
184 : "memory"); 178 : "memory");
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 10074ad9ebf8..1d74d161687c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -269,11 +269,14 @@ ENTRY(ret_from_fork)
269 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? 269 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
270 jz 1f 270 jz 1f
271 271
272 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET 272 /*
273 jnz int_ret_from_sys_call 273 * By the time we get here, we have no idea whether our pt_regs,
274 274 * ti flags, and ti status came from the 64-bit SYSCALL fast path,
275 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET 275 * the slow path, or one of the ia32entry paths.
276 jmp ret_from_sys_call # go to the SYSRET fastpath 276 * Use int_ret_from_sys_call to return, since it can safely handle
277 * all of the above.
278 */
279 jmp int_ret_from_sys_call
277 280
2781: 2811:
279 subq $REST_SKIP, %rsp # leave space for volatiles 282 subq $REST_SKIP, %rsp # leave space for volatiles
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e0b794a84c35..106c01557f2b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4950,7 +4950,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4950 goto done; 4950 goto done;
4951 } 4951 }
4952 } 4952 }
4953 ctxt->dst.orig_val = ctxt->dst.val; 4953 /* Copy full 64-bit value for CMPXCHG8B. */
4954 ctxt->dst.orig_val64 = ctxt->dst.val64;
4954 4955
4955special_insn: 4956special_insn:
4956 4957
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index e55b5fc344eb..bd4e34de24c7 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1572,7 +1572,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
1572 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); 1572 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
1573 } 1573 }
1574 apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm); 1574 apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
1575 apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm); 1575 apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0;
1576 apic->highest_isr_cache = -1; 1576 apic->highest_isr_cache = -1;
1577 update_divide_count(apic); 1577 update_divide_count(apic);
1578 atomic_set(&apic->lapic_timer.pending, 0); 1578 atomic_set(&apic->lapic_timer.pending, 0);
@@ -1782,7 +1782,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
1782 update_divide_count(apic); 1782 update_divide_count(apic);
1783 start_apic_timer(apic); 1783 start_apic_timer(apic);
1784 apic->irr_pending = true; 1784 apic->irr_pending = true;
1785 apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ? 1785 apic->isr_count = kvm_x86_ops->hwapic_isr_update ?
1786 1 : count_vectors(apic->regs + APIC_ISR); 1786 1 : count_vectors(apic->regs + APIC_ISR);
1787 apic->highest_isr_cache = -1; 1787 apic->highest_isr_cache = -1;
1788 if (kvm_x86_ops->hwapic_irr_update) 1788 if (kvm_x86_ops->hwapic_irr_update)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d319e0c24758..cc618c882f90 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3649,11 +3649,6 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
3649 return; 3649 return;
3650} 3650}
3651 3651
3652static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
3653{
3654 return;
3655}
3656
3657static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu) 3652static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
3658{ 3653{
3659 return; 3654 return;
@@ -4403,7 +4398,6 @@ static struct kvm_x86_ops svm_x86_ops = {
4403 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, 4398 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
4404 .vm_has_apicv = svm_vm_has_apicv, 4399 .vm_has_apicv = svm_vm_has_apicv,
4405 .load_eoi_exitmap = svm_load_eoi_exitmap, 4400 .load_eoi_exitmap = svm_load_eoi_exitmap,
4406 .hwapic_isr_update = svm_hwapic_isr_update,
4407 .sync_pir_to_irr = svm_sync_pir_to_irr, 4401 .sync_pir_to_irr = svm_sync_pir_to_irr,
4408 4402
4409 .set_tss_addr = svm_set_tss_addr, 4403 .set_tss_addr = svm_set_tss_addr,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 14c1a18d206a..f7b20b417a3a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4367,6 +4367,18 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
4367 return 0; 4367 return 0;
4368} 4368}
4369 4369
4370static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
4371{
4372#ifdef CONFIG_SMP
4373 if (vcpu->mode == IN_GUEST_MODE) {
4374 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
4375 POSTED_INTR_VECTOR);
4376 return true;
4377 }
4378#endif
4379 return false;
4380}
4381
4370static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, 4382static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
4371 int vector) 4383 int vector)
4372{ 4384{
@@ -4375,9 +4387,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
4375 if (is_guest_mode(vcpu) && 4387 if (is_guest_mode(vcpu) &&
4376 vector == vmx->nested.posted_intr_nv) { 4388 vector == vmx->nested.posted_intr_nv) {
4377 /* the PIR and ON have been set by L1. */ 4389 /* the PIR and ON have been set by L1. */
4378 if (vcpu->mode == IN_GUEST_MODE) 4390 kvm_vcpu_trigger_posted_interrupt(vcpu);
4379 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
4380 POSTED_INTR_VECTOR);
4381 /* 4391 /*
4382 * If a posted intr is not recognized by hardware, 4392 * If a posted intr is not recognized by hardware,
4383 * we will accomplish it in the next vmentry. 4393 * we will accomplish it in the next vmentry.
@@ -4409,12 +4419,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
4409 4419
4410 r = pi_test_and_set_on(&vmx->pi_desc); 4420 r = pi_test_and_set_on(&vmx->pi_desc);
4411 kvm_make_request(KVM_REQ_EVENT, vcpu); 4421 kvm_make_request(KVM_REQ_EVENT, vcpu);
4412#ifdef CONFIG_SMP 4422 if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu))
4413 if (!r && (vcpu->mode == IN_GUEST_MODE))
4414 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
4415 POSTED_INTR_VECTOR);
4416 else
4417#endif
4418 kvm_vcpu_kick(vcpu); 4423 kvm_vcpu_kick(vcpu);
4419} 4424}
4420 4425
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 6ac273832f28..e4695985f9de 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -331,7 +331,7 @@ static void probe_pci_root_info(struct pci_root_info *info,
331 struct list_head *list) 331 struct list_head *list)
332{ 332{
333 int ret; 333 int ret;
334 struct resource_entry *entry; 334 struct resource_entry *entry, *tmp;
335 335
336 sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum); 336 sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
337 info->bridge = device; 337 info->bridge = device;
@@ -345,8 +345,13 @@ static void probe_pci_root_info(struct pci_root_info *info,
345 dev_dbg(&device->dev, 345 dev_dbg(&device->dev,
346 "no IO and memory resources present in _CRS\n"); 346 "no IO and memory resources present in _CRS\n");
347 else 347 else
348 resource_list_for_each_entry(entry, list) 348 resource_list_for_each_entry_safe(entry, tmp, list) {
349 entry->res->name = info->name; 349 if ((entry->res->flags & IORESOURCE_WINDOW) == 0 ||
350 (entry->res->flags & IORESOURCE_DISABLED))
351 resource_list_destroy_entry(entry);
352 else
353 entry->res->name = info->name;
354 }
350} 355}
351 356
352struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) 357struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)