diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/x86.c | 86 |
1 files changed, 45 insertions, 41 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c8a2793626ec..61eddbeabeb4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -166,6 +166,43 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | |||
166 | enable_irq(dev->host_irq); | 166 | enable_irq(dev->host_irq); |
167 | } | 167 | } |
168 | 168 | ||
169 | static void kvm_free_assigned_device(struct kvm *kvm, | ||
170 | struct kvm_assigned_dev_kernel | ||
171 | *assigned_dev) | ||
172 | { | ||
173 | if (irqchip_in_kernel(kvm) && assigned_dev->irq_requested) | ||
174 | free_irq(assigned_dev->host_irq, (void *)assigned_dev); | ||
175 | |||
176 | kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); | ||
177 | |||
178 | if (cancel_work_sync(&assigned_dev->interrupt_work)) | ||
179 | /* We had pending work. That means we will have to take | ||
180 | * care of kvm_put_kvm. | ||
181 | */ | ||
182 | kvm_put_kvm(kvm); | ||
183 | |||
184 | pci_release_regions(assigned_dev->dev); | ||
185 | pci_disable_device(assigned_dev->dev); | ||
186 | pci_dev_put(assigned_dev->dev); | ||
187 | |||
188 | list_del(&assigned_dev->list); | ||
189 | kfree(assigned_dev); | ||
190 | } | ||
191 | |||
192 | static void kvm_free_all_assigned_devices(struct kvm *kvm) | ||
193 | { | ||
194 | struct list_head *ptr, *ptr2; | ||
195 | struct kvm_assigned_dev_kernel *assigned_dev; | ||
196 | |||
197 | list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { | ||
198 | assigned_dev = list_entry(ptr, | ||
199 | struct kvm_assigned_dev_kernel, | ||
200 | list); | ||
201 | |||
202 | kvm_free_assigned_device(kvm, assigned_dev); | ||
203 | } | ||
204 | } | ||
205 | |||
169 | static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, | 206 | static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, |
170 | struct kvm_assigned_irq | 207 | struct kvm_assigned_irq |
171 | *assigned_irq) | 208 | *assigned_irq) |
@@ -194,8 +231,8 @@ static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, | |||
194 | 231 | ||
195 | if (irqchip_in_kernel(kvm)) { | 232 | if (irqchip_in_kernel(kvm)) { |
196 | if (!capable(CAP_SYS_RAWIO)) { | 233 | if (!capable(CAP_SYS_RAWIO)) { |
197 | return -EPERM; | 234 | r = -EPERM; |
198 | goto out; | 235 | goto out_release; |
199 | } | 236 | } |
200 | 237 | ||
201 | if (assigned_irq->host_irq) | 238 | if (assigned_irq->host_irq) |
@@ -214,17 +251,18 @@ static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, | |||
214 | */ | 251 | */ |
215 | if (request_irq(match->host_irq, kvm_assigned_dev_intr, 0, | 252 | if (request_irq(match->host_irq, kvm_assigned_dev_intr, 0, |
216 | "kvm_assigned_device", (void *)match)) { | 253 | "kvm_assigned_device", (void *)match)) { |
217 | printk(KERN_INFO "%s: couldn't allocate irq for pv " | ||
218 | "device\n", __func__); | ||
219 | r = -EIO; | 254 | r = -EIO; |
220 | goto out; | 255 | goto out_release; |
221 | } | 256 | } |
222 | } | 257 | } |
223 | 258 | ||
224 | match->irq_requested = true; | 259 | match->irq_requested = true; |
225 | out: | ||
226 | mutex_unlock(&kvm->lock); | 260 | mutex_unlock(&kvm->lock); |
227 | return r; | 261 | return r; |
262 | out_release: | ||
263 | mutex_unlock(&kvm->lock); | ||
264 | kvm_free_assigned_device(kvm, match); | ||
265 | return r; | ||
228 | } | 266 | } |
229 | 267 | ||
230 | static int kvm_vm_ioctl_assign_device(struct kvm *kvm, | 268 | static int kvm_vm_ioctl_assign_device(struct kvm *kvm, |
@@ -300,40 +338,6 @@ out_free: | |||
300 | return r; | 338 | return r; |
301 | } | 339 | } |
302 | 340 | ||
303 | static void kvm_free_assigned_devices(struct kvm *kvm) | ||
304 | { | ||
305 | struct list_head *ptr, *ptr2; | ||
306 | struct kvm_assigned_dev_kernel *assigned_dev; | ||
307 | |||
308 | list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { | ||
309 | assigned_dev = list_entry(ptr, | ||
310 | struct kvm_assigned_dev_kernel, | ||
311 | list); | ||
312 | |||
313 | if (irqchip_in_kernel(kvm) && assigned_dev->irq_requested) { | ||
314 | free_irq(assigned_dev->host_irq, | ||
315 | (void *)assigned_dev); | ||
316 | |||
317 | kvm_unregister_irq_ack_notifier(kvm, | ||
318 | &assigned_dev-> | ||
319 | ack_notifier); | ||
320 | } | ||
321 | |||
322 | if (cancel_work_sync(&assigned_dev->interrupt_work)) | ||
323 | /* We had pending work. That means we will have to take | ||
324 | * care of kvm_put_kvm. | ||
325 | */ | ||
326 | kvm_put_kvm(kvm); | ||
327 | |||
328 | pci_release_regions(assigned_dev->dev); | ||
329 | pci_disable_device(assigned_dev->dev); | ||
330 | pci_dev_put(assigned_dev->dev); | ||
331 | |||
332 | list_del(&assigned_dev->list); | ||
333 | kfree(assigned_dev); | ||
334 | } | ||
335 | } | ||
336 | |||
337 | unsigned long segment_base(u16 selector) | 341 | unsigned long segment_base(u16 selector) |
338 | { | 342 | { |
339 | struct descriptor_table gdt; | 343 | struct descriptor_table gdt; |
@@ -4296,7 +4300,7 @@ static void kvm_free_vcpus(struct kvm *kvm) | |||
4296 | void kvm_arch_destroy_vm(struct kvm *kvm) | 4300 | void kvm_arch_destroy_vm(struct kvm *kvm) |
4297 | { | 4301 | { |
4298 | kvm_iommu_unmap_guest(kvm); | 4302 | kvm_iommu_unmap_guest(kvm); |
4299 | kvm_free_assigned_devices(kvm); | 4303 | kvm_free_all_assigned_devices(kvm); |
4300 | kvm_free_pit(kvm); | 4304 | kvm_free_pit(kvm); |
4301 | kfree(kvm->arch.vpic); | 4305 | kfree(kvm->arch.vpic); |
4302 | kfree(kvm->arch.vioapic); | 4306 | kfree(kvm->arch.vioapic); |