aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Honig <ahonig@google.com>2013-11-20 13:23:22 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2013-12-12 16:39:46 -0500
commitfda4e2e85589191b123d31cdc21fd33ee70f50fd (patch)
tree68cfbeb6ea9d06ffd164bcc8b692993a90367333
parentb963a22e6d1a266a67e9eecc88134713fd54775c (diff)
KVM: x86: Convert vapic synchronization to _cached functions (CVE-2013-6368)
In kvm_lapic_sync_from_vapic and kvm_lapic_sync_to_vapic there is the potential to corrupt kernel memory if userspace provides an address that is at the end of a page. This patches concerts those functions to use kvm_write_guest_cached and kvm_read_guest_cached. It also checks the vapic_address specified by userspace during ioctl processing and returns an error to userspace if the address is not a valid GPA. This is generally not guest triggerable, because the required write is done by firmware that runs before the guest. Also, it only affects AMD processors and oldish Intel that do not have the FlexPriority feature (unless you disable FlexPriority, of course; then newer processors are also affected). Fixes: b93463aa59d6 ('KVM: Accelerated apic support') Reported-by: Andrew Honig <ahonig@google.com> Cc: stable@vger.kernel.org Signed-off-by: Andrew Honig <ahonig@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/lapic.c27
-rw-r--r--arch/x86/kvm/lapic.h4
-rw-r--r--arch/x86/kvm/x86.c40
3 files changed, 18 insertions, 53 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 89b52ec7d09c..b8bec45c1610 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1692,7 +1692,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
1692void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) 1692void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
1693{ 1693{
1694 u32 data; 1694 u32 data;
1695 void *vapic;
1696 1695
1697 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) 1696 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
1698 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); 1697 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
@@ -1700,9 +1699,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
1700 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) 1699 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
1701 return; 1700 return;
1702 1701
1703 vapic = kmap_atomic(vcpu->arch.apic->vapic_page); 1702 kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
1704 data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); 1703 sizeof(u32));
1705 kunmap_atomic(vapic);
1706 1704
1707 apic_set_tpr(vcpu->arch.apic, data & 0xff); 1705 apic_set_tpr(vcpu->arch.apic, data & 0xff);
1708} 1706}
@@ -1738,7 +1736,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
1738 u32 data, tpr; 1736 u32 data, tpr;
1739 int max_irr, max_isr; 1737 int max_irr, max_isr;
1740 struct kvm_lapic *apic = vcpu->arch.apic; 1738 struct kvm_lapic *apic = vcpu->arch.apic;
1741 void *vapic;
1742 1739
1743 apic_sync_pv_eoi_to_guest(vcpu, apic); 1740 apic_sync_pv_eoi_to_guest(vcpu, apic);
1744 1741
@@ -1754,18 +1751,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
1754 max_isr = 0; 1751 max_isr = 0;
1755 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); 1752 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
1756 1753
1757 vapic = kmap_atomic(vcpu->arch.apic->vapic_page); 1754 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
1758 *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; 1755 sizeof(u32));
1759 kunmap_atomic(vapic);
1760} 1756}
1761 1757
1762void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) 1758int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
1763{ 1759{
1764 vcpu->arch.apic->vapic_addr = vapic_addr; 1760 if (vapic_addr) {
1765 if (vapic_addr) 1761 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
1762 &vcpu->arch.apic->vapic_cache,
1763 vapic_addr, sizeof(u32)))
1764 return -EINVAL;
1766 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 1765 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
1767 else 1766 } else {
1768 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 1767 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
1768 }
1769
1770 vcpu->arch.apic->vapic_addr = vapic_addr;
1771 return 0;
1769} 1772}
1770 1773
1771int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1774int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index c730ac9fe801..c8b0d0d2da5c 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -34,7 +34,7 @@ struct kvm_lapic {
34 */ 34 */
35 void *regs; 35 void *regs;
36 gpa_t vapic_addr; 36 gpa_t vapic_addr;
37 struct page *vapic_page; 37 struct gfn_to_hva_cache vapic_cache;
38 unsigned long pending_events; 38 unsigned long pending_events;
39 unsigned int sipi_vector; 39 unsigned int sipi_vector;
40}; 40};
@@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
76void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset); 76void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
77void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector); 77void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
78 78
79void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); 79int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
80void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); 80void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
81void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); 81void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
82 82
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 21ef1ba184ae..5d004da1e35d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
3214 r = -EFAULT; 3214 r = -EFAULT;
3215 if (copy_from_user(&va, argp, sizeof va)) 3215 if (copy_from_user(&va, argp, sizeof va))
3216 goto out; 3216 goto out;
3217 r = 0; 3217 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3218 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3219 break; 3218 break;
3220 } 3219 }
3221 case KVM_X86_SETUP_MCE: { 3220 case KVM_X86_SETUP_MCE: {
@@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5739 !kvm_event_needs_reinjection(vcpu); 5738 !kvm_event_needs_reinjection(vcpu);
5740} 5739}
5741 5740
5742static int vapic_enter(struct kvm_vcpu *vcpu)
5743{
5744 struct kvm_lapic *apic = vcpu->arch.apic;
5745 struct page *page;
5746
5747 if (!apic || !apic->vapic_addr)
5748 return 0;
5749
5750 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
5751 if (is_error_page(page))
5752 return -EFAULT;
5753
5754 vcpu->arch.apic->vapic_page = page;
5755 return 0;
5756}
5757
5758static void vapic_exit(struct kvm_vcpu *vcpu)
5759{
5760 struct kvm_lapic *apic = vcpu->arch.apic;
5761 int idx;
5762
5763 if (!apic || !apic->vapic_addr)
5764 return;
5765
5766 idx = srcu_read_lock(&vcpu->kvm->srcu);
5767 kvm_release_page_dirty(apic->vapic_page);
5768 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
5769 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5770}
5771
5772static void update_cr8_intercept(struct kvm_vcpu *vcpu) 5741static void update_cr8_intercept(struct kvm_vcpu *vcpu)
5773{ 5742{
5774 int max_irr, tpr; 5743 int max_irr, tpr;
@@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
6069 struct kvm *kvm = vcpu->kvm; 6038 struct kvm *kvm = vcpu->kvm;
6070 6039
6071 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 6040 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6072 r = vapic_enter(vcpu);
6073 if (r) {
6074 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6075 return r;
6076 }
6077 6041
6078 r = 1; 6042 r = 1;
6079 while (r > 0) { 6043 while (r > 0) {
@@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
6132 6096
6133 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 6097 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6134 6098
6135 vapic_exit(vcpu);
6136
6137 return r; 6099 return r;
6138} 6100}
6139 6101