aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c3
-rw-r--r--arch/x86/kernel/microcode_core.c3
-rw-r--r--arch/x86/kvm/i8259.c2
-rw-r--r--arch/x86/kvm/vmx.c23
-rw-r--r--arch/x86/kvm/x86.c16
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/p2m.c2
8 files changed, 40 insertions, 12 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 7f2739e03e79..0d3d63afa76a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2008,6 +2008,7 @@ __init int intel_pmu_init(void)
2008 break; 2008 break;
2009 2009
2010 case 28: /* Atom */ 2010 case 28: /* Atom */
2011 case 54: /* Cedariew */
2011 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 2012 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2012 sizeof(hw_cache_event_ids)); 2013 sizeof(hw_cache_event_ids));
2013 2014
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 520b4265fcd2..da02e9cc3754 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -686,7 +686,8 @@ void intel_pmu_lbr_init_atom(void)
686 * to have an operational LBR which can freeze 686 * to have an operational LBR which can freeze
687 * on PMU interrupt 687 * on PMU interrupt
688 */ 688 */
689 if (boot_cpu_data.x86_mask < 10) { 689 if (boot_cpu_data.x86_model == 28
690 && boot_cpu_data.x86_mask < 10) {
690 pr_cont("LBR disabled due to erratum"); 691 pr_cont("LBR disabled due to erratum");
691 return; 692 return;
692 } 693 }
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 4873e62db6a1..9e5bcf1e2376 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -225,6 +225,9 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
225 if (do_microcode_update(buf, len) == 0) 225 if (do_microcode_update(buf, len) == 0)
226 ret = (ssize_t)len; 226 ret = (ssize_t)len;
227 227
228 if (ret > 0)
229 perf_check_microcode();
230
228 mutex_unlock(&microcode_mutex); 231 mutex_unlock(&microcode_mutex);
229 put_online_cpus(); 232 put_online_cpus();
230 233
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index e498b18f010c..9fc9aa7ac703 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -318,7 +318,7 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
318 if (val & 0x10) { 318 if (val & 0x10) {
319 u8 edge_irr = s->irr & ~s->elcr; 319 u8 edge_irr = s->irr & ~s->elcr;
320 int i; 320 int i;
321 bool found; 321 bool found = false;
322 struct kvm_vcpu *vcpu; 322 struct kvm_vcpu *vcpu;
323 323
324 s->init4 = val & 1; 324 s->init4 = val & 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c00f03de1b79..b1eb202ee76a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3619,6 +3619,7 @@ static void seg_setup(int seg)
3619 3619
3620static int alloc_apic_access_page(struct kvm *kvm) 3620static int alloc_apic_access_page(struct kvm *kvm)
3621{ 3621{
3622 struct page *page;
3622 struct kvm_userspace_memory_region kvm_userspace_mem; 3623 struct kvm_userspace_memory_region kvm_userspace_mem;
3623 int r = 0; 3624 int r = 0;
3624 3625
@@ -3633,7 +3634,13 @@ static int alloc_apic_access_page(struct kvm *kvm)
3633 if (r) 3634 if (r)
3634 goto out; 3635 goto out;
3635 3636
3636 kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); 3637 page = gfn_to_page(kvm, 0xfee00);
3638 if (is_error_page(page)) {
3639 r = -EFAULT;
3640 goto out;
3641 }
3642
3643 kvm->arch.apic_access_page = page;
3637out: 3644out:
3638 mutex_unlock(&kvm->slots_lock); 3645 mutex_unlock(&kvm->slots_lock);
3639 return r; 3646 return r;
@@ -3641,6 +3648,7 @@ out:
3641 3648
3642static int alloc_identity_pagetable(struct kvm *kvm) 3649static int alloc_identity_pagetable(struct kvm *kvm)
3643{ 3650{
3651 struct page *page;
3644 struct kvm_userspace_memory_region kvm_userspace_mem; 3652 struct kvm_userspace_memory_region kvm_userspace_mem;
3645 int r = 0; 3653 int r = 0;
3646 3654
@@ -3656,8 +3664,13 @@ static int alloc_identity_pagetable(struct kvm *kvm)
3656 if (r) 3664 if (r)
3657 goto out; 3665 goto out;
3658 3666
3659 kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, 3667 page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
3660 kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); 3668 if (is_error_page(page)) {
3669 r = -EFAULT;
3670 goto out;
3671 }
3672
3673 kvm->arch.ept_identity_pagetable = page;
3661out: 3674out:
3662 mutex_unlock(&kvm->slots_lock); 3675 mutex_unlock(&kvm->slots_lock);
3663 return r; 3676 return r;
@@ -6575,7 +6588,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
6575 /* Exposing INVPCID only when PCID is exposed */ 6588 /* Exposing INVPCID only when PCID is exposed */
6576 best = kvm_find_cpuid_entry(vcpu, 0x7, 0); 6589 best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
6577 if (vmx_invpcid_supported() && 6590 if (vmx_invpcid_supported() &&
6578 best && (best->ecx & bit(X86_FEATURE_INVPCID)) && 6591 best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
6579 guest_cpuid_has_pcid(vcpu)) { 6592 guest_cpuid_has_pcid(vcpu)) {
6580 exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; 6593 exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
6581 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, 6594 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
@@ -6585,7 +6598,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
6585 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, 6598 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
6586 exec_control); 6599 exec_control);
6587 if (best) 6600 if (best)
6588 best->ecx &= ~bit(X86_FEATURE_INVPCID); 6601 best->ebx &= ~bit(X86_FEATURE_INVPCID);
6589 } 6602 }
6590} 6603}
6591 6604
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index dce75b760312..2966c847d489 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2000,6 +2000,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2000 case MSR_KVM_STEAL_TIME: 2000 case MSR_KVM_STEAL_TIME:
2001 data = vcpu->arch.st.msr_val; 2001 data = vcpu->arch.st.msr_val;
2002 break; 2002 break;
2003 case MSR_KVM_PV_EOI_EN:
2004 data = vcpu->arch.pv_eoi.msr_val;
2005 break;
2003 case MSR_IA32_P5_MC_ADDR: 2006 case MSR_IA32_P5_MC_ADDR:
2004 case MSR_IA32_P5_MC_TYPE: 2007 case MSR_IA32_P5_MC_TYPE:
2005 case MSR_IA32_MCG_CAP: 2008 case MSR_IA32_MCG_CAP:
@@ -5110,17 +5113,20 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5110 !kvm_event_needs_reinjection(vcpu); 5113 !kvm_event_needs_reinjection(vcpu);
5111} 5114}
5112 5115
5113static void vapic_enter(struct kvm_vcpu *vcpu) 5116static int vapic_enter(struct kvm_vcpu *vcpu)
5114{ 5117{
5115 struct kvm_lapic *apic = vcpu->arch.apic; 5118 struct kvm_lapic *apic = vcpu->arch.apic;
5116 struct page *page; 5119 struct page *page;
5117 5120
5118 if (!apic || !apic->vapic_addr) 5121 if (!apic || !apic->vapic_addr)
5119 return; 5122 return 0;
5120 5123
5121 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); 5124 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
5125 if (is_error_page(page))
5126 return -EFAULT;
5122 5127
5123 vcpu->arch.apic->vapic_page = page; 5128 vcpu->arch.apic->vapic_page = page;
5129 return 0;
5124} 5130}
5125 5131
5126static void vapic_exit(struct kvm_vcpu *vcpu) 5132static void vapic_exit(struct kvm_vcpu *vcpu)
@@ -5427,7 +5433,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
5427 } 5433 }
5428 5434
5429 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 5435 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5430 vapic_enter(vcpu); 5436 r = vapic_enter(vcpu);
5437 if (r) {
5438 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5439 return r;
5440 }
5431 5441
5432 r = 1; 5442 r = 1;
5433 while (r > 0) { 5443 while (r > 0) {
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index b65a76133f4f..5141d808e751 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
1283 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); 1283 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1284 1284
1285 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; 1285 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1286 if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { 1286 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
1287 args->op.cmd = MMUEXT_INVLPG_MULTI; 1287 args->op.cmd = MMUEXT_INVLPG_MULTI;
1288 args->op.arg1.linear_addr = start; 1288 args->op.arg1.linear_addr = start;
1289 } 1289 }
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index d4b255463253..76ba0e97e530 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -599,7 +599,7 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_
599 if (p2m_index(set_pfn)) 599 if (p2m_index(set_pfn))
600 return false; 600 return false;
601 601
602 for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { 602 for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
603 topidx = p2m_top_index(pfn); 603 topidx = p2m_top_index(pfn);
604 604
605 if (!p2m_top[topidx]) 605 if (!p2m_top[topidx])