aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorXiantao Zhang <xiantao.zhang@intel.com>2008-10-06 01:47:38 -0400
committerAvi Kivity <avi@redhat.com>2008-10-15 08:25:33 -0400
commit8a98f6648a2b0756d8f26d6c13332f5526355fec (patch)
tree8df03d146d22082644df8078747e332f9b2aff1f /arch
parent371c01b28e4049d1fbf60a9631cdad98f7cae030 (diff)
KVM: Move device assignment logic to common code
To share with other archs, this patch moves device assignment logic to common parts. Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/x86.c255
1 files changed, 0 insertions, 255 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d6d7123d2644..f8bde01ba8e6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -30,7 +30,6 @@
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/kvm.h> 31#include <linux/kvm.h>
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/pci.h>
34#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
35#include <linux/module.h> 34#include <linux/module.h>
36#include <linux/mman.h> 35#include <linux/mman.h>
@@ -107,238 +106,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
107 { NULL } 106 { NULL }
108}; 107};
109 108
110static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
111 int assigned_dev_id)
112{
113 struct list_head *ptr;
114 struct kvm_assigned_dev_kernel *match;
115
116 list_for_each(ptr, head) {
117 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
118 if (match->assigned_dev_id == assigned_dev_id)
119 return match;
120 }
121 return NULL;
122}
123
124static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
125{
126 struct kvm_assigned_dev_kernel *assigned_dev;
127
128 assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
129 interrupt_work);
130
131 /* This is taken to safely inject irq inside the guest. When
132 * the interrupt injection (or the ioapic code) uses a
133 * finer-grained lock, update this
134 */
135 mutex_lock(&assigned_dev->kvm->lock);
136 kvm_set_irq(assigned_dev->kvm,
137 assigned_dev->guest_irq, 1);
138 mutex_unlock(&assigned_dev->kvm->lock);
139 kvm_put_kvm(assigned_dev->kvm);
140}
141
142/* FIXME: Implement the OR logic needed to make shared interrupts on
143 * this line behave properly
144 */
145static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
146{
147 struct kvm_assigned_dev_kernel *assigned_dev =
148 (struct kvm_assigned_dev_kernel *) dev_id;
149
150 kvm_get_kvm(assigned_dev->kvm);
151 schedule_work(&assigned_dev->interrupt_work);
152 disable_irq_nosync(irq);
153 return IRQ_HANDLED;
154}
155
156/* Ack the irq line for an assigned device */
157static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
158{
159 struct kvm_assigned_dev_kernel *dev;
160
161 if (kian->gsi == -1)
162 return;
163
164 dev = container_of(kian, struct kvm_assigned_dev_kernel,
165 ack_notifier);
166 kvm_set_irq(dev->kvm, dev->guest_irq, 0);
167 enable_irq(dev->host_irq);
168}
169
170static void kvm_free_assigned_device(struct kvm *kvm,
171 struct kvm_assigned_dev_kernel
172 *assigned_dev)
173{
174 if (irqchip_in_kernel(kvm) && assigned_dev->irq_requested)
175 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
176
177 kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
178
179 if (cancel_work_sync(&assigned_dev->interrupt_work))
180 /* We had pending work. That means we will have to take
181 * care of kvm_put_kvm.
182 */
183 kvm_put_kvm(kvm);
184
185 pci_release_regions(assigned_dev->dev);
186 pci_disable_device(assigned_dev->dev);
187 pci_dev_put(assigned_dev->dev);
188
189 list_del(&assigned_dev->list);
190 kfree(assigned_dev);
191}
192
193static void kvm_free_all_assigned_devices(struct kvm *kvm)
194{
195 struct list_head *ptr, *ptr2;
196 struct kvm_assigned_dev_kernel *assigned_dev;
197
198 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
199 assigned_dev = list_entry(ptr,
200 struct kvm_assigned_dev_kernel,
201 list);
202
203 kvm_free_assigned_device(kvm, assigned_dev);
204 }
205}
206
207static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
208 struct kvm_assigned_irq
209 *assigned_irq)
210{
211 int r = 0;
212 struct kvm_assigned_dev_kernel *match;
213
214 mutex_lock(&kvm->lock);
215
216 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
217 assigned_irq->assigned_dev_id);
218 if (!match) {
219 mutex_unlock(&kvm->lock);
220 return -EINVAL;
221 }
222
223 if (match->irq_requested) {
224 match->guest_irq = assigned_irq->guest_irq;
225 match->ack_notifier.gsi = assigned_irq->guest_irq;
226 mutex_unlock(&kvm->lock);
227 return 0;
228 }
229
230 INIT_WORK(&match->interrupt_work,
231 kvm_assigned_dev_interrupt_work_handler);
232
233 if (irqchip_in_kernel(kvm)) {
234 if (!capable(CAP_SYS_RAWIO)) {
235 r = -EPERM;
236 goto out_release;
237 }
238
239 if (assigned_irq->host_irq)
240 match->host_irq = assigned_irq->host_irq;
241 else
242 match->host_irq = match->dev->irq;
243 match->guest_irq = assigned_irq->guest_irq;
244 match->ack_notifier.gsi = assigned_irq->guest_irq;
245 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
246 kvm_register_irq_ack_notifier(kvm, &match->ack_notifier);
247
248 /* Even though this is PCI, we don't want to use shared
249 * interrupts. Sharing host devices with guest-assigned devices
250 * on the same interrupt line is not a happy situation: there
251 * are going to be long delays in accepting, acking, etc.
252 */
253 if (request_irq(match->host_irq, kvm_assigned_dev_intr, 0,
254 "kvm_assigned_device", (void *)match)) {
255 r = -EIO;
256 goto out_release;
257 }
258 }
259
260 match->irq_requested = true;
261 mutex_unlock(&kvm->lock);
262 return r;
263out_release:
264 mutex_unlock(&kvm->lock);
265 kvm_free_assigned_device(kvm, match);
266 return r;
267}
268
269static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
270 struct kvm_assigned_pci_dev *assigned_dev)
271{
272 int r = 0;
273 struct kvm_assigned_dev_kernel *match;
274 struct pci_dev *dev;
275
276 mutex_lock(&kvm->lock);
277
278 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
279 assigned_dev->assigned_dev_id);
280 if (match) {
281 /* device already assigned */
282 r = -EINVAL;
283 goto out;
284 }
285
286 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
287 if (match == NULL) {
288 printk(KERN_INFO "%s: Couldn't allocate memory\n",
289 __func__);
290 r = -ENOMEM;
291 goto out;
292 }
293 dev = pci_get_bus_and_slot(assigned_dev->busnr,
294 assigned_dev->devfn);
295 if (!dev) {
296 printk(KERN_INFO "%s: host device not found\n", __func__);
297 r = -EINVAL;
298 goto out_free;
299 }
300 if (pci_enable_device(dev)) {
301 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
302 r = -EBUSY;
303 goto out_put;
304 }
305 r = pci_request_regions(dev, "kvm_assigned_device");
306 if (r) {
307 printk(KERN_INFO "%s: Could not get access to device regions\n",
308 __func__);
309 goto out_disable;
310 }
311 match->assigned_dev_id = assigned_dev->assigned_dev_id;
312 match->host_busnr = assigned_dev->busnr;
313 match->host_devfn = assigned_dev->devfn;
314 match->dev = dev;
315
316 match->kvm = kvm;
317
318 list_add(&match->list, &kvm->arch.assigned_dev_head);
319
320 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
321 r = kvm_iommu_map_guest(kvm, match);
322 if (r)
323 goto out_list_del;
324 }
325
326out:
327 mutex_unlock(&kvm->lock);
328 return r;
329out_list_del:
330 list_del(&match->list);
331 pci_release_regions(dev);
332out_disable:
333 pci_disable_device(dev);
334out_put:
335 pci_dev_put(dev);
336out_free:
337 kfree(match);
338 mutex_unlock(&kvm->lock);
339 return r;
340}
341
342unsigned long segment_base(u16 selector) 109unsigned long segment_base(u16 selector)
343{ 110{
344 struct descriptor_table gdt; 111 struct descriptor_table gdt;
@@ -2030,28 +1797,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
2030 goto out; 1797 goto out;
2031 break; 1798 break;
2032 } 1799 }
2033 case KVM_ASSIGN_PCI_DEVICE: {
2034 struct kvm_assigned_pci_dev assigned_dev;
2035
2036 r = -EFAULT;
2037 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
2038 goto out;
2039 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
2040 if (r)
2041 goto out;
2042 break;
2043 }
2044 case KVM_ASSIGN_IRQ: {
2045 struct kvm_assigned_irq assigned_irq;
2046
2047 r = -EFAULT;
2048 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
2049 goto out;
2050 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
2051 if (r)
2052 goto out;
2053 break;
2054 }
2055 case KVM_GET_PIT: { 1800 case KVM_GET_PIT: {
2056 r = -EFAULT; 1801 r = -EFAULT;
2057 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) 1802 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))