aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/coalesced_mmio.c
diff options
context:
space:
mode:
authorSasha Levin <levinsasha928@gmail.com>2011-07-18 10:17:14 -0400
committerAvi Kivity <avi@redhat.com>2011-09-25 12:17:18 -0400
commitc298125f4bc30fdbe4b7c33460ef57271cc51a7d (patch)
treea553f1507496afae35c2a7eaefb5a45be25738e0 /virt/kvm/coalesced_mmio.c
parent22388a3c8ce2a2a004ce764194cce8a2f9b13d66 (diff)
KVM: MMIO: Lock coalesced device when checking for available entry
Move the check whether there are available entries to within the spinlock. This allows working with larger amount of VCPUs and reduces premature exits when using a large number of VCPUs. Cc: Avi Kivity <avi@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Sasha Levin <levinsasha928@gmail.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt/kvm/coalesced_mmio.c')
-rw-r--r--virt/kvm/coalesced_mmio.c42
1 files changed, 27 insertions, 15 deletions
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index fc8487564d1f..ae075dc0890d 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -25,23 +25,8 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
25 gpa_t addr, int len) 25 gpa_t addr, int len)
26{ 26{
27 struct kvm_coalesced_mmio_zone *zone; 27 struct kvm_coalesced_mmio_zone *zone;
28 struct kvm_coalesced_mmio_ring *ring;
29 unsigned avail;
30 int i; 28 int i;
31 29
32 /* Are we able to batch it ? */
33
34 /* last is the first free entry
35 * check if we don't meet the first used entry
36 * there is always one unused entry in the buffer
37 */
38 ring = dev->kvm->coalesced_mmio_ring;
39 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
40 if (avail < KVM_MAX_VCPUS) {
41 /* full */
42 return 0;
43 }
44
45 /* is it in a batchable area ? */ 30 /* is it in a batchable area ? */
46 31
47 for (i = 0; i < dev->nb_zones; i++) { 32 for (i = 0; i < dev->nb_zones; i++) {
@@ -58,16 +43,43 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
58 return 0; 43 return 0;
59} 44}
60 45
46static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
47{
48 struct kvm_coalesced_mmio_ring *ring;
49 unsigned avail;
50
51 /* Are we able to batch it ? */
52
53 /* last is the first free entry
54 * check if we don't meet the first used entry
55 * there is always one unused entry in the buffer
56 */
57 ring = dev->kvm->coalesced_mmio_ring;
58 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
59 if (avail == 0) {
60 /* full */
61 return 0;
62 }
63
64 return 1;
65}
66
61static int coalesced_mmio_write(struct kvm_io_device *this, 67static int coalesced_mmio_write(struct kvm_io_device *this,
62 gpa_t addr, int len, const void *val) 68 gpa_t addr, int len, const void *val)
63{ 69{
64 struct kvm_coalesced_mmio_dev *dev = to_mmio(this); 70 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
65 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; 71 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
72
66 if (!coalesced_mmio_in_range(dev, addr, len)) 73 if (!coalesced_mmio_in_range(dev, addr, len))
67 return -EOPNOTSUPP; 74 return -EOPNOTSUPP;
68 75
69 spin_lock(&dev->lock); 76 spin_lock(&dev->lock);
70 77
78 if (!coalesced_mmio_has_room(dev)) {
79 spin_unlock(&dev->lock);
80 return -EOPNOTSUPP;
81 }
82
71 /* copy data in first free entry of the ring */ 83 /* copy data in first free entry of the ring */
72 84
73 ring->coalesced_mmio[ring->last].phys_addr = addr; 85 ring->coalesced_mmio[ring->last].phys_addr = addr;