aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/coalesced_mmio.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/coalesced_mmio.c')
-rw-r--r--virt/kvm/coalesced_mmio.c74
1 files changed, 41 insertions, 33 deletions
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 5ae620d32fac..04d69cd7049b 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -14,32 +14,28 @@
14 14
15#include "coalesced_mmio.h" 15#include "coalesced_mmio.h"
16 16
17static int coalesced_mmio_in_range(struct kvm_io_device *this, 17static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
18 gpa_t addr, int len, int is_write) 18{
19 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
20}
21
22static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
23 gpa_t addr, int len)
19{ 24{
20 struct kvm_coalesced_mmio_dev *dev =
21 (struct kvm_coalesced_mmio_dev*)this->private;
22 struct kvm_coalesced_mmio_zone *zone; 25 struct kvm_coalesced_mmio_zone *zone;
23 int next; 26 struct kvm_coalesced_mmio_ring *ring;
27 unsigned avail;
24 int i; 28 int i;
25 29
26 if (!is_write)
27 return 0;
28
29 /* kvm->lock is taken by the caller and must be not released before
30 * dev.read/write
31 */
32
33 /* Are we able to batch it ? */ 30 /* Are we able to batch it ? */
34 31
35 /* last is the first free entry 32 /* last is the first free entry
36 * check if we don't meet the first used entry 33 * check if we don't meet the first used entry
37 * there is always one unused entry in the buffer 34 * there is always one unused entry in the buffer
38 */ 35 */
39 36 ring = dev->kvm->coalesced_mmio_ring;
40 next = (dev->kvm->coalesced_mmio_ring->last + 1) % 37 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
41 KVM_COALESCED_MMIO_MAX; 38 if (avail < KVM_MAX_VCPUS) {
42 if (next == dev->kvm->coalesced_mmio_ring->first) {
43 /* full */ 39 /* full */
44 return 0; 40 return 0;
45 } 41 }
@@ -60,14 +56,15 @@ static int coalesced_mmio_in_range(struct kvm_io_device *this,
60 return 0; 56 return 0;
61} 57}
62 58
63static void coalesced_mmio_write(struct kvm_io_device *this, 59static int coalesced_mmio_write(struct kvm_io_device *this,
64 gpa_t addr, int len, const void *val) 60 gpa_t addr, int len, const void *val)
65{ 61{
66 struct kvm_coalesced_mmio_dev *dev = 62 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
67 (struct kvm_coalesced_mmio_dev*)this->private;
68 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; 63 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
64 if (!coalesced_mmio_in_range(dev, addr, len))
65 return -EOPNOTSUPP;
69 66
70 /* kvm->lock must be taken by caller before call to in_range()*/ 67 spin_lock(&dev->lock);
71 68
72 /* copy data in first free entry of the ring */ 69 /* copy data in first free entry of the ring */
73 70
@@ -76,29 +73,40 @@ static void coalesced_mmio_write(struct kvm_io_device *this,
76 memcpy(ring->coalesced_mmio[ring->last].data, val, len); 73 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
77 smp_wmb(); 74 smp_wmb();
78 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; 75 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
76 spin_unlock(&dev->lock);
77 return 0;
79} 78}
80 79
81static void coalesced_mmio_destructor(struct kvm_io_device *this) 80static void coalesced_mmio_destructor(struct kvm_io_device *this)
82{ 81{
83 kfree(this); 82 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
83
84 kfree(dev);
84} 85}
85 86
87static const struct kvm_io_device_ops coalesced_mmio_ops = {
88 .write = coalesced_mmio_write,
89 .destructor = coalesced_mmio_destructor,
90};
91
86int kvm_coalesced_mmio_init(struct kvm *kvm) 92int kvm_coalesced_mmio_init(struct kvm *kvm)
87{ 93{
88 struct kvm_coalesced_mmio_dev *dev; 94 struct kvm_coalesced_mmio_dev *dev;
95 int ret;
89 96
90 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); 97 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
91 if (!dev) 98 if (!dev)
92 return -ENOMEM; 99 return -ENOMEM;
93 dev->dev.write = coalesced_mmio_write; 100 spin_lock_init(&dev->lock);
94 dev->dev.in_range = coalesced_mmio_in_range; 101 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
95 dev->dev.destructor = coalesced_mmio_destructor;
96 dev->dev.private = dev;
97 dev->kvm = kvm; 102 dev->kvm = kvm;
98 kvm->coalesced_mmio_dev = dev; 103 kvm->coalesced_mmio_dev = dev;
99 kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev);
100 104
101 return 0; 105 ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &dev->dev);
106 if (ret < 0)
107 kfree(dev);
108
109 return ret;
102} 110}
103 111
104int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, 112int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
@@ -109,16 +117,16 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
109 if (dev == NULL) 117 if (dev == NULL)
110 return -EINVAL; 118 return -EINVAL;
111 119
112 mutex_lock(&kvm->lock); 120 down_write(&kvm->slots_lock);
113 if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) { 121 if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
114 mutex_unlock(&kvm->lock); 122 up_write(&kvm->slots_lock);
115 return -ENOBUFS; 123 return -ENOBUFS;
116 } 124 }
117 125
118 dev->zone[dev->nb_zones] = *zone; 126 dev->zone[dev->nb_zones] = *zone;
119 dev->nb_zones++; 127 dev->nb_zones++;
120 128
121 mutex_unlock(&kvm->lock); 129 up_write(&kvm->slots_lock);
122 return 0; 130 return 0;
123} 131}
124 132
@@ -132,7 +140,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
132 if (dev == NULL) 140 if (dev == NULL)
133 return -EINVAL; 141 return -EINVAL;
134 142
135 mutex_lock(&kvm->lock); 143 down_write(&kvm->slots_lock);
136 144
137 i = dev->nb_zones; 145 i = dev->nb_zones;
138 while(i) { 146 while(i) {
@@ -150,7 +158,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
150 i--; 158 i--;
151 } 159 }
152 160
153 mutex_unlock(&kvm->lock); 161 up_write(&kvm->slots_lock);
154 162
155 return 0; 163 return 0;
156} 164}