summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorMatt Delco <delco@chromium.org>2019-09-16 17:16:54 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2019-09-18 09:56:55 -0400
commitb60fe990c6b07ef6d4df67bc0530c7c90a62623a (patch)
tree49d837232351e9b86c9632ccc509f26596e269ed /virt
parenta9c20bb0206ae9384bd470a6832dd8913730add9 (diff)
KVM: coalesced_mmio: add bounds checking
The first/last indexes are typically shared with a user app. The app can change the 'last' index that the kernel uses to store the next result. This change sanity checks the index before using it for writing to a potentially arbitrary address. This fixes CVE-2019-14821. Cc: stable@vger.kernel.org Fixes: 5f94c1741bdc ("KVM: Add coalesced MMIO support (common part)") Signed-off-by: Matt Delco <delco@chromium.org> Signed-off-by: Jim Mattson <jmattson@google.com> Reported-by: syzbot+983c866c3dd6efa3662a@syzkaller.appspotmail.com [Use READ_ONCE. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/coalesced_mmio.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 5294abb3f178..8ffd07e2a160 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
40 return 1; 40 return 1;
41} 41}
42 42
43static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) 43static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
44{ 44{
45 struct kvm_coalesced_mmio_ring *ring; 45 struct kvm_coalesced_mmio_ring *ring;
46 unsigned avail; 46 unsigned avail;
@@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
52 * there is always one unused entry in the buffer 52 * there is always one unused entry in the buffer
53 */ 53 */
54 ring = dev->kvm->coalesced_mmio_ring; 54 ring = dev->kvm->coalesced_mmio_ring;
55 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; 55 avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
56 if (avail == 0) { 56 if (avail == 0) {
57 /* full */ 57 /* full */
58 return 0; 58 return 0;
@@ -67,25 +67,28 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
67{ 67{
68 struct kvm_coalesced_mmio_dev *dev = to_mmio(this); 68 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
69 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; 69 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
70 __u32 insert;
70 71
71 if (!coalesced_mmio_in_range(dev, addr, len)) 72 if (!coalesced_mmio_in_range(dev, addr, len))
72 return -EOPNOTSUPP; 73 return -EOPNOTSUPP;
73 74
74 spin_lock(&dev->kvm->ring_lock); 75 spin_lock(&dev->kvm->ring_lock);
75 76
76 if (!coalesced_mmio_has_room(dev)) { 77 insert = READ_ONCE(ring->last);
78 if (!coalesced_mmio_has_room(dev, insert) ||
79 insert >= KVM_COALESCED_MMIO_MAX) {
77 spin_unlock(&dev->kvm->ring_lock); 80 spin_unlock(&dev->kvm->ring_lock);
78 return -EOPNOTSUPP; 81 return -EOPNOTSUPP;
79 } 82 }
80 83
81 /* copy data in first free entry of the ring */ 84 /* copy data in first free entry of the ring */
82 85
83 ring->coalesced_mmio[ring->last].phys_addr = addr; 86 ring->coalesced_mmio[insert].phys_addr = addr;
84 ring->coalesced_mmio[ring->last].len = len; 87 ring->coalesced_mmio[insert].len = len;
85 memcpy(ring->coalesced_mmio[ring->last].data, val, len); 88 memcpy(ring->coalesced_mmio[insert].data, val, len);
86 ring->coalesced_mmio[ring->last].pio = dev->zone.pio; 89 ring->coalesced_mmio[insert].pio = dev->zone.pio;
87 smp_wmb(); 90 smp_wmb();
88 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; 91 ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
89 spin_unlock(&dev->kvm->ring_lock); 92 spin_unlock(&dev->kvm->ring_lock);
90 return 0; 93 return 0;
91} 94}