aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/assigned-dev.c209
1 files changed, 180 insertions, 29 deletions
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index ece80612b594..08e05715df72 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -55,22 +55,66 @@ static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
55 return index; 55 return index;
56} 56}
57 57
58static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id) 58static irqreturn_t kvm_assigned_dev_intx(int irq, void *dev_id)
59{ 59{
60 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; 60 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
61 int ret;
61 62
62 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) { 63 spin_lock(&assigned_dev->intx_lock);
63 spin_lock(&assigned_dev->intx_lock); 64 if (pci_check_and_mask_intx(assigned_dev->dev)) {
65 assigned_dev->host_irq_disabled = true;
66 ret = IRQ_WAKE_THREAD;
67 } else
68 ret = IRQ_NONE;
69 spin_unlock(&assigned_dev->intx_lock);
70
71 return ret;
72}
73
74static void
75kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev,
76 int vector)
77{
78 if (unlikely(assigned_dev->irq_requested_type &
79 KVM_DEV_IRQ_GUEST_INTX)) {
80 mutex_lock(&assigned_dev->intx_mask_lock);
81 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX))
82 kvm_set_irq(assigned_dev->kvm,
83 assigned_dev->irq_source_id, vector, 1);
84 mutex_unlock(&assigned_dev->intx_mask_lock);
85 } else
86 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
87 vector, 1);
88}
89
90static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id)
91{
92 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
93
94 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) {
95 spin_lock_irq(&assigned_dev->intx_lock);
64 disable_irq_nosync(irq); 96 disable_irq_nosync(irq);
65 assigned_dev->host_irq_disabled = true; 97 assigned_dev->host_irq_disabled = true;
66 spin_unlock(&assigned_dev->intx_lock); 98 spin_unlock_irq(&assigned_dev->intx_lock);
67 } 99 }
68 100
69 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 101 kvm_assigned_dev_raise_guest_irq(assigned_dev,
70 assigned_dev->guest_irq, 1); 102 assigned_dev->guest_irq);
103
104 return IRQ_HANDLED;
105}
106
107#ifdef __KVM_HAVE_MSI
108static irqreturn_t kvm_assigned_dev_thread_msi(int irq, void *dev_id)
109{
110 struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
111
112 kvm_assigned_dev_raise_guest_irq(assigned_dev,
113 assigned_dev->guest_irq);
71 114
72 return IRQ_HANDLED; 115 return IRQ_HANDLED;
73} 116}
117#endif
74 118
75#ifdef __KVM_HAVE_MSIX 119#ifdef __KVM_HAVE_MSIX
76static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id) 120static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id)
@@ -81,8 +125,7 @@ static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id)
81 125
82 if (index >= 0) { 126 if (index >= 0) {
83 vector = assigned_dev->guest_msix_entries[index].vector; 127 vector = assigned_dev->guest_msix_entries[index].vector;
84 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 128 kvm_assigned_dev_raise_guest_irq(assigned_dev, vector);
85 vector, 1);
86 } 129 }
87 130
88 return IRQ_HANDLED; 131 return IRQ_HANDLED;
@@ -98,15 +141,31 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
98 141
99 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); 142 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
100 143
101 /* The guest irq may be shared so this ack may be 144 mutex_lock(&dev->intx_mask_lock);
102 * from another device. 145
103 */ 146 if (!(dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) {
104 spin_lock(&dev->intx_lock); 147 bool reassert = false;
105 if (dev->host_irq_disabled) { 148
106 enable_irq(dev->host_irq); 149 spin_lock_irq(&dev->intx_lock);
107 dev->host_irq_disabled = false; 150 /*
151 * The guest IRQ may be shared so this ack can come from an
152 * IRQ for another guest device.
153 */
154 if (dev->host_irq_disabled) {
155 if (!(dev->flags & KVM_DEV_ASSIGN_PCI_2_3))
156 enable_irq(dev->host_irq);
157 else if (!pci_check_and_unmask_intx(dev->dev))
158 reassert = true;
159 dev->host_irq_disabled = reassert;
160 }
161 spin_unlock_irq(&dev->intx_lock);
162
163 if (reassert)
164 kvm_set_irq(dev->kvm, dev->irq_source_id,
165 dev->guest_irq, 1);
108 } 166 }
109 spin_unlock(&dev->intx_lock); 167
168 mutex_unlock(&dev->intx_mask_lock);
110} 169}
111 170
112static void deassign_guest_irq(struct kvm *kvm, 171static void deassign_guest_irq(struct kvm *kvm,
@@ -154,7 +213,15 @@ static void deassign_host_irq(struct kvm *kvm,
154 pci_disable_msix(assigned_dev->dev); 213 pci_disable_msix(assigned_dev->dev);
155 } else { 214 } else {
156 /* Deal with MSI and INTx */ 215 /* Deal with MSI and INTx */
157 disable_irq(assigned_dev->host_irq); 216 if ((assigned_dev->irq_requested_type &
217 KVM_DEV_IRQ_HOST_INTX) &&
218 (assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) {
219 spin_lock_irq(&assigned_dev->intx_lock);
220 pci_intx(assigned_dev->dev, false);
221 spin_unlock_irq(&assigned_dev->intx_lock);
222 synchronize_irq(assigned_dev->host_irq);
223 } else
224 disable_irq(assigned_dev->host_irq);
158 225
159 free_irq(assigned_dev->host_irq, assigned_dev); 226 free_irq(assigned_dev->host_irq, assigned_dev);
160 227
@@ -235,15 +302,34 @@ void kvm_free_all_assigned_devices(struct kvm *kvm)
235static int assigned_device_enable_host_intx(struct kvm *kvm, 302static int assigned_device_enable_host_intx(struct kvm *kvm,
236 struct kvm_assigned_dev_kernel *dev) 303 struct kvm_assigned_dev_kernel *dev)
237{ 304{
305 irq_handler_t irq_handler;
306 unsigned long flags;
307
238 dev->host_irq = dev->dev->irq; 308 dev->host_irq = dev->dev->irq;
239 /* Even though this is PCI, we don't want to use shared 309
240 * interrupts. Sharing host devices with guest-assigned devices 310 /*
241 * on the same interrupt line is not a happy situation: there 311 * We can only share the IRQ line with other host devices if we are
242 * are going to be long delays in accepting, acking, etc. 312 * able to disable the IRQ source at device-level - independently of
313 * the guest driver. Otherwise host devices may suffer from unbounded
314 * IRQ latencies when the guest keeps the line asserted.
243 */ 315 */
244 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread, 316 if (dev->flags & KVM_DEV_ASSIGN_PCI_2_3) {
245 IRQF_ONESHOT, dev->irq_name, dev)) 317 irq_handler = kvm_assigned_dev_intx;
318 flags = IRQF_SHARED;
319 } else {
320 irq_handler = NULL;
321 flags = IRQF_ONESHOT;
322 }
323 if (request_threaded_irq(dev->host_irq, irq_handler,
324 kvm_assigned_dev_thread_intx, flags,
325 dev->irq_name, dev))
246 return -EIO; 326 return -EIO;
327
328 if (dev->flags & KVM_DEV_ASSIGN_PCI_2_3) {
329 spin_lock_irq(&dev->intx_lock);
330 pci_intx(dev->dev, true);
331 spin_unlock_irq(&dev->intx_lock);
332 }
247 return 0; 333 return 0;
248} 334}
249 335
@@ -260,8 +346,9 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
260 } 346 }
261 347
262 dev->host_irq = dev->dev->irq; 348 dev->host_irq = dev->dev->irq;
263 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread, 349 if (request_threaded_irq(dev->host_irq, NULL,
264 0, dev->irq_name, dev)) { 350 kvm_assigned_dev_thread_msi, 0,
351 dev->irq_name, dev)) {
265 pci_disable_msi(dev->dev); 352 pci_disable_msi(dev->dev);
266 return -EIO; 353 return -EIO;
267 } 354 }
@@ -319,7 +406,6 @@ static int assigned_device_enable_guest_msi(struct kvm *kvm,
319{ 406{
320 dev->guest_irq = irq->guest_irq; 407 dev->guest_irq = irq->guest_irq;
321 dev->ack_notifier.gsi = -1; 408 dev->ack_notifier.gsi = -1;
322 dev->host_irq_disabled = false;
323 return 0; 409 return 0;
324} 410}
325#endif 411#endif
@@ -331,7 +417,6 @@ static int assigned_device_enable_guest_msix(struct kvm *kvm,
331{ 417{
332 dev->guest_irq = irq->guest_irq; 418 dev->guest_irq = irq->guest_irq;
333 dev->ack_notifier.gsi = -1; 419 dev->ack_notifier.gsi = -1;
334 dev->host_irq_disabled = false;
335 return 0; 420 return 0;
336} 421}
337#endif 422#endif
@@ -365,6 +450,7 @@ static int assign_host_irq(struct kvm *kvm,
365 default: 450 default:
366 r = -EINVAL; 451 r = -EINVAL;
367 } 452 }
453 dev->host_irq_disabled = false;
368 454
369 if (!r) 455 if (!r)
370 dev->irq_requested_type |= host_irq_type; 456 dev->irq_requested_type |= host_irq_type;
@@ -466,6 +552,7 @@ static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
466{ 552{
467 int r = -ENODEV; 553 int r = -ENODEV;
468 struct kvm_assigned_dev_kernel *match; 554 struct kvm_assigned_dev_kernel *match;
555 unsigned long irq_type;
469 556
470 mutex_lock(&kvm->lock); 557 mutex_lock(&kvm->lock);
471 558
@@ -474,7 +561,9 @@ static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
474 if (!match) 561 if (!match)
475 goto out; 562 goto out;
476 563
477 r = kvm_deassign_irq(kvm, match, assigned_irq->flags); 564 irq_type = assigned_irq->flags & (KVM_DEV_IRQ_HOST_MASK |
565 KVM_DEV_IRQ_GUEST_MASK);
566 r = kvm_deassign_irq(kvm, match, irq_type);
478out: 567out:
479 mutex_unlock(&kvm->lock); 568 mutex_unlock(&kvm->lock);
480 return r; 569 return r;
@@ -607,6 +696,10 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
607 if (!match->pci_saved_state) 696 if (!match->pci_saved_state)
608 printk(KERN_DEBUG "%s: Couldn't store %s saved state\n", 697 printk(KERN_DEBUG "%s: Couldn't store %s saved state\n",
609 __func__, dev_name(&dev->dev)); 698 __func__, dev_name(&dev->dev));
699
700 if (!pci_intx_mask_supported(dev))
701 assigned_dev->flags &= ~KVM_DEV_ASSIGN_PCI_2_3;
702
610 match->assigned_dev_id = assigned_dev->assigned_dev_id; 703 match->assigned_dev_id = assigned_dev->assigned_dev_id;
611 match->host_segnr = assigned_dev->segnr; 704 match->host_segnr = assigned_dev->segnr;
612 match->host_busnr = assigned_dev->busnr; 705 match->host_busnr = assigned_dev->busnr;
@@ -614,6 +707,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
614 match->flags = assigned_dev->flags; 707 match->flags = assigned_dev->flags;
615 match->dev = dev; 708 match->dev = dev;
616 spin_lock_init(&match->intx_lock); 709 spin_lock_init(&match->intx_lock);
710 mutex_init(&match->intx_mask_lock);
617 match->irq_source_id = -1; 711 match->irq_source_id = -1;
618 match->kvm = kvm; 712 match->kvm = kvm;
619 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; 713 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
@@ -759,6 +853,55 @@ msix_entry_out:
759} 853}
760#endif 854#endif
761 855
856static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm,
857 struct kvm_assigned_pci_dev *assigned_dev)
858{
859 int r = 0;
860 struct kvm_assigned_dev_kernel *match;
861
862 mutex_lock(&kvm->lock);
863
864 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
865 assigned_dev->assigned_dev_id);
866 if (!match) {
867 r = -ENODEV;
868 goto out;
869 }
870
871 mutex_lock(&match->intx_mask_lock);
872
873 match->flags &= ~KVM_DEV_ASSIGN_MASK_INTX;
874 match->flags |= assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX;
875
876 if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
877 if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) {
878 kvm_set_irq(match->kvm, match->irq_source_id,
879 match->guest_irq, 0);
880 /*
881 * Masking at hardware-level is performed on demand,
882 * i.e. when an IRQ actually arrives at the host.
883 */
884 } else if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) {
885 /*
886 * Unmask the IRQ line if required. Unmasking at
887 * device level will be performed by user space.
888 */
889 spin_lock_irq(&match->intx_lock);
890 if (match->host_irq_disabled) {
891 enable_irq(match->host_irq);
892 match->host_irq_disabled = false;
893 }
894 spin_unlock_irq(&match->intx_lock);
895 }
896 }
897
898 mutex_unlock(&match->intx_mask_lock);
899
900out:
901 mutex_unlock(&kvm->lock);
902 return r;
903}
904
762long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 905long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
763 unsigned long arg) 906 unsigned long arg)
764{ 907{
@@ -866,6 +1009,15 @@ long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
866 break; 1009 break;
867 } 1010 }
868#endif 1011#endif
1012 case KVM_ASSIGN_SET_INTX_MASK: {
1013 struct kvm_assigned_pci_dev assigned_dev;
1014
1015 r = -EFAULT;
1016 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
1017 goto out;
1018 r = kvm_vm_ioctl_set_pci_irq_mask(kvm, &assigned_dev);
1019 break;
1020 }
869 default: 1021 default:
870 r = -ENOTTY; 1022 r = -ENOTTY;
871 break; 1023 break;
@@ -873,4 +1025,3 @@ long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
873out: 1025out:
874 return r; 1026 return r;
875} 1027}
876