aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/coalesced_mmio.c10
-rw-r--r--virt/kvm/coalesced_mmio.h1
2 files changed, 5 insertions, 6 deletions
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 754906800999..397f41936698 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -31,10 +31,6 @@ static int coalesced_mmio_in_range(struct kvm_io_device *this,
31 if (!is_write) 31 if (!is_write)
32 return 0; 32 return 0;
33 33
34 /* kvm->lock is taken by the caller and must be not released before
35 * dev.read/write
36 */
37
38 /* Are we able to batch it ? */ 34 /* Are we able to batch it ? */
39 35
40 /* last is the first free entry 36 /* last is the first free entry
@@ -43,7 +39,7 @@ static int coalesced_mmio_in_range(struct kvm_io_device *this,
43 */ 39 */
44 ring = dev->kvm->coalesced_mmio_ring; 40 ring = dev->kvm->coalesced_mmio_ring;
45 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; 41 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
46 if (avail < 1) { 42 if (avail < KVM_MAX_VCPUS) {
47 /* full */ 43 /* full */
48 return 0; 44 return 0;
49 } 45 }
@@ -70,7 +66,7 @@ static void coalesced_mmio_write(struct kvm_io_device *this,
70 struct kvm_coalesced_mmio_dev *dev = to_mmio(this); 66 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
71 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; 67 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
72 68
73 /* kvm->lock must be taken by caller before call to in_range()*/ 69 spin_lock(&dev->lock);
74 70
75 /* copy data in first free entry of the ring */ 71 /* copy data in first free entry of the ring */
76 72
@@ -79,6 +75,7 @@ static void coalesced_mmio_write(struct kvm_io_device *this,
79 memcpy(ring->coalesced_mmio[ring->last].data, val, len); 75 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
80 smp_wmb(); 76 smp_wmb();
81 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; 77 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
78 spin_unlock(&dev->lock);
82} 79}
83 80
84static void coalesced_mmio_destructor(struct kvm_io_device *this) 81static void coalesced_mmio_destructor(struct kvm_io_device *this)
@@ -101,6 +98,7 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
101 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); 98 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
102 if (!dev) 99 if (!dev)
103 return -ENOMEM; 100 return -ENOMEM;
101 spin_lock_init(&dev->lock);
104 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); 102 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
105 dev->kvm = kvm; 103 dev->kvm = kvm;
106 kvm->coalesced_mmio_dev = dev; 104 kvm->coalesced_mmio_dev = dev;
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h
index 5ac0ec628461..4b49f27fa31e 100644
--- a/virt/kvm/coalesced_mmio.h
+++ b/virt/kvm/coalesced_mmio.h
@@ -12,6 +12,7 @@
12struct kvm_coalesced_mmio_dev { 12struct kvm_coalesced_mmio_dev {
13 struct kvm_io_device dev; 13 struct kvm_io_device dev;
14 struct kvm *kvm; 14 struct kvm *kvm;
15 spinlock_t lock;
15 int nb_zones; 16 int nb_zones;
16 struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX]; 17 struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX];
17}; 18};