diff options
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/coalesced_mmio.c | 118 | ||||
-rw-r--r-- | virt/kvm/coalesced_mmio.h | 7 |
2 files changed, 50 insertions, 75 deletions
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index ae075dc0890d..2316ec1aadc4 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c | |||
@@ -24,23 +24,13 @@ static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev) | |||
24 | static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, | 24 | static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, |
25 | gpa_t addr, int len) | 25 | gpa_t addr, int len) |
26 | { | 26 | { |
27 | struct kvm_coalesced_mmio_zone *zone; | 27 | /* is it in a batchable area ? |
28 | int i; | 28 | * (addr,len) is fully included in |
29 | 29 | * (zone->addr, zone->size) | |
30 | /* is it in a batchable area ? */ | 30 | */ |
31 | |||
32 | for (i = 0; i < dev->nb_zones; i++) { | ||
33 | zone = &dev->zone[i]; | ||
34 | |||
35 | /* (addr,len) is fully included in | ||
36 | * (zone->addr, zone->size) | ||
37 | */ | ||
38 | 31 | ||
39 | if (zone->addr <= addr && | 32 | return (dev->zone.addr <= addr && |
40 | addr + len <= zone->addr + zone->size) | 33 | addr + len <= dev->zone.addr + dev->zone.size); |
41 | return 1; | ||
42 | } | ||
43 | return 0; | ||
44 | } | 34 | } |
45 | 35 | ||
46 | static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) | 36 | static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) |
@@ -73,10 +63,10 @@ static int coalesced_mmio_write(struct kvm_io_device *this, | |||
73 | if (!coalesced_mmio_in_range(dev, addr, len)) | 63 | if (!coalesced_mmio_in_range(dev, addr, len)) |
74 | return -EOPNOTSUPP; | 64 | return -EOPNOTSUPP; |
75 | 65 | ||
76 | spin_lock(&dev->lock); | 66 | spin_lock(&dev->kvm->ring_lock); |
77 | 67 | ||
78 | if (!coalesced_mmio_has_room(dev)) { | 68 | if (!coalesced_mmio_has_room(dev)) { |
79 | spin_unlock(&dev->lock); | 69 | spin_unlock(&dev->kvm->ring_lock); |
80 | return -EOPNOTSUPP; | 70 | return -EOPNOTSUPP; |
81 | } | 71 | } |
82 | 72 | ||
@@ -87,7 +77,7 @@ static int coalesced_mmio_write(struct kvm_io_device *this, | |||
87 | memcpy(ring->coalesced_mmio[ring->last].data, val, len); | 77 | memcpy(ring->coalesced_mmio[ring->last].data, val, len); |
88 | smp_wmb(); | 78 | smp_wmb(); |
89 | ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; | 79 | ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; |
90 | spin_unlock(&dev->lock); | 80 | spin_unlock(&dev->kvm->ring_lock); |
91 | return 0; | 81 | return 0; |
92 | } | 82 | } |
93 | 83 | ||
@@ -95,6 +85,8 @@ static void coalesced_mmio_destructor(struct kvm_io_device *this) | |||
95 | { | 85 | { |
96 | struct kvm_coalesced_mmio_dev *dev = to_mmio(this); | 86 | struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
97 | 87 | ||
88 | list_del(&dev->list); | ||
89 | |||
98 | kfree(dev); | 90 | kfree(dev); |
99 | } | 91 | } |
100 | 92 | ||
@@ -105,7 +97,6 @@ static const struct kvm_io_device_ops coalesced_mmio_ops = { | |||
105 | 97 | ||
106 | int kvm_coalesced_mmio_init(struct kvm *kvm) | 98 | int kvm_coalesced_mmio_init(struct kvm *kvm) |
107 | { | 99 | { |
108 | struct kvm_coalesced_mmio_dev *dev; | ||
109 | struct page *page; | 100 | struct page *page; |
110 | int ret; | 101 | int ret; |
111 | 102 | ||
@@ -113,31 +104,18 @@ int kvm_coalesced_mmio_init(struct kvm *kvm) | |||
113 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | 104 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
114 | if (!page) | 105 | if (!page) |
115 | goto out_err; | 106 | goto out_err; |
116 | kvm->coalesced_mmio_ring = page_address(page); | ||
117 | 107 | ||
118 | ret = -ENOMEM; | 108 | ret = 0; |
119 | dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); | 109 | kvm->coalesced_mmio_ring = page_address(page); |
120 | if (!dev) | ||
121 | goto out_free_page; | ||
122 | spin_lock_init(&dev->lock); | ||
123 | kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); | ||
124 | dev->kvm = kvm; | ||
125 | kvm->coalesced_mmio_dev = dev; | ||
126 | |||
127 | mutex_lock(&kvm->slots_lock); | ||
128 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev); | ||
129 | mutex_unlock(&kvm->slots_lock); | ||
130 | if (ret < 0) | ||
131 | goto out_free_dev; | ||
132 | 110 | ||
133 | return ret; | 111 | /* |
112 | * We're using this spinlock to sync access to the coalesced ring. | ||
113 | * The list doesn't need it's own lock since device registration and | ||
114 | * unregistration should only happen when kvm->slots_lock is held. | ||
115 | */ | ||
116 | spin_lock_init(&kvm->ring_lock); | ||
117 | INIT_LIST_HEAD(&kvm->coalesced_zones); | ||
134 | 118 | ||
135 | out_free_dev: | ||
136 | kvm->coalesced_mmio_dev = NULL; | ||
137 | kfree(dev); | ||
138 | out_free_page: | ||
139 | kvm->coalesced_mmio_ring = NULL; | ||
140 | __free_page(page); | ||
141 | out_err: | 119 | out_err: |
142 | return ret; | 120 | return ret; |
143 | } | 121 | } |
@@ -151,51 +129,49 @@ void kvm_coalesced_mmio_free(struct kvm *kvm) | |||
151 | int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, | 129 | int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, |
152 | struct kvm_coalesced_mmio_zone *zone) | 130 | struct kvm_coalesced_mmio_zone *zone) |
153 | { | 131 | { |
154 | struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; | 132 | int ret; |
133 | struct kvm_coalesced_mmio_dev *dev; | ||
155 | 134 | ||
156 | if (dev == NULL) | 135 | dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); |
157 | return -ENXIO; | 136 | if (!dev) |
137 | return -ENOMEM; | ||
138 | |||
139 | kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); | ||
140 | dev->kvm = kvm; | ||
141 | dev->zone = *zone; | ||
158 | 142 | ||
159 | mutex_lock(&kvm->slots_lock); | 143 | mutex_lock(&kvm->slots_lock); |
160 | if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) { | 144 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev); |
161 | mutex_unlock(&kvm->slots_lock); | 145 | if (ret < 0) |
162 | return -ENOBUFS; | 146 | goto out_free_dev; |
163 | } | 147 | list_add_tail(&dev->list, &kvm->coalesced_zones); |
148 | mutex_unlock(&kvm->slots_lock); | ||
164 | 149 | ||
165 | dev->zone[dev->nb_zones] = *zone; | 150 | return ret; |
166 | dev->nb_zones++; | ||
167 | 151 | ||
152 | out_free_dev: | ||
168 | mutex_unlock(&kvm->slots_lock); | 153 | mutex_unlock(&kvm->slots_lock); |
154 | |||
155 | kfree(dev); | ||
156 | |||
157 | if (dev == NULL) | ||
158 | return -ENXIO; | ||
159 | |||
169 | return 0; | 160 | return 0; |
170 | } | 161 | } |
171 | 162 | ||
172 | int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, | 163 | int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, |
173 | struct kvm_coalesced_mmio_zone *zone) | 164 | struct kvm_coalesced_mmio_zone *zone) |
174 | { | 165 | { |
175 | int i; | 166 | struct kvm_coalesced_mmio_dev *dev, *tmp; |
176 | struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev; | ||
177 | struct kvm_coalesced_mmio_zone *z; | ||
178 | |||
179 | if (dev == NULL) | ||
180 | return -ENXIO; | ||
181 | 167 | ||
182 | mutex_lock(&kvm->slots_lock); | 168 | mutex_lock(&kvm->slots_lock); |
183 | 169 | ||
184 | i = dev->nb_zones; | 170 | list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) |
185 | while (i) { | 171 | if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) { |
186 | z = &dev->zone[i - 1]; | 172 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev); |
187 | 173 | kvm_iodevice_destructor(&dev->dev); | |
188 | /* unregister all zones | ||
189 | * included in (zone->addr, zone->size) | ||
190 | */ | ||
191 | |||
192 | if (zone->addr <= z->addr && | ||
193 | z->addr + z->size <= zone->addr + zone->size) { | ||
194 | dev->nb_zones--; | ||
195 | *z = dev->zone[dev->nb_zones]; | ||
196 | } | 174 | } |
197 | i--; | ||
198 | } | ||
199 | 175 | ||
200 | mutex_unlock(&kvm->slots_lock); | 176 | mutex_unlock(&kvm->slots_lock); |
201 | 177 | ||
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h index 8a5959e3535f..b280c20444d1 100644 --- a/virt/kvm/coalesced_mmio.h +++ b/virt/kvm/coalesced_mmio.h | |||
@@ -12,14 +12,13 @@ | |||
12 | 12 | ||
13 | #ifdef CONFIG_KVM_MMIO | 13 | #ifdef CONFIG_KVM_MMIO |
14 | 14 | ||
15 | #define KVM_COALESCED_MMIO_ZONE_MAX 100 | 15 | #include <linux/list.h> |
16 | 16 | ||
17 | struct kvm_coalesced_mmio_dev { | 17 | struct kvm_coalesced_mmio_dev { |
18 | struct list_head list; | ||
18 | struct kvm_io_device dev; | 19 | struct kvm_io_device dev; |
19 | struct kvm *kvm; | 20 | struct kvm *kvm; |
20 | spinlock_t lock; | 21 | struct kvm_coalesced_mmio_zone zone; |
21 | int nb_zones; | ||
22 | struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX]; | ||
23 | }; | 22 | }; |
24 | 23 | ||
25 | int kvm_coalesced_mmio_init(struct kvm *kvm); | 24 | int kvm_coalesced_mmio_init(struct kvm *kvm); |