diff options
Diffstat (limited to 'virt/kvm/assigned-dev.c')
-rw-r--r-- | virt/kvm/assigned-dev.c | 213 |
1 files changed, 181 insertions, 32 deletions
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index 758e3b36d4cf..01f572c10c71 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c | |||
@@ -49,31 +49,73 @@ static int find_index_from_host_irq(struct kvm_assigned_dev_kernel | |||
49 | index = i; | 49 | index = i; |
50 | break; | 50 | break; |
51 | } | 51 | } |
52 | if (index < 0) { | 52 | if (index < 0) |
53 | printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n"); | 53 | printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n"); |
54 | return 0; | ||
55 | } | ||
56 | 54 | ||
57 | return index; | 55 | return index; |
58 | } | 56 | } |
59 | 57 | ||
60 | static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id) | 58 | static irqreturn_t kvm_assigned_dev_intx(int irq, void *dev_id) |
61 | { | 59 | { |
62 | struct kvm_assigned_dev_kernel *assigned_dev = dev_id; | 60 | struct kvm_assigned_dev_kernel *assigned_dev = dev_id; |
61 | int ret; | ||
62 | |||
63 | spin_lock(&assigned_dev->intx_lock); | ||
64 | if (pci_check_and_mask_intx(assigned_dev->dev)) { | ||
65 | assigned_dev->host_irq_disabled = true; | ||
66 | ret = IRQ_WAKE_THREAD; | ||
67 | } else | ||
68 | ret = IRQ_NONE; | ||
69 | spin_unlock(&assigned_dev->intx_lock); | ||
70 | |||
71 | return ret; | ||
72 | } | ||
63 | 73 | ||
64 | if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) { | 74 | static void |
65 | spin_lock(&assigned_dev->intx_lock); | 75 | kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev, |
76 | int vector) | ||
77 | { | ||
78 | if (unlikely(assigned_dev->irq_requested_type & | ||
79 | KVM_DEV_IRQ_GUEST_INTX)) { | ||
80 | spin_lock(&assigned_dev->intx_mask_lock); | ||
81 | if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) | ||
82 | kvm_set_irq(assigned_dev->kvm, | ||
83 | assigned_dev->irq_source_id, vector, 1); | ||
84 | spin_unlock(&assigned_dev->intx_mask_lock); | ||
85 | } else | ||
86 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | ||
87 | vector, 1); | ||
88 | } | ||
89 | |||
90 | static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id) | ||
91 | { | ||
92 | struct kvm_assigned_dev_kernel *assigned_dev = dev_id; | ||
93 | |||
94 | if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { | ||
95 | spin_lock_irq(&assigned_dev->intx_lock); | ||
66 | disable_irq_nosync(irq); | 96 | disable_irq_nosync(irq); |
67 | assigned_dev->host_irq_disabled = true; | 97 | assigned_dev->host_irq_disabled = true; |
68 | spin_unlock(&assigned_dev->intx_lock); | 98 | spin_unlock_irq(&assigned_dev->intx_lock); |
69 | } | 99 | } |
70 | 100 | ||
71 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | 101 | kvm_assigned_dev_raise_guest_irq(assigned_dev, |
72 | assigned_dev->guest_irq, 1); | 102 | assigned_dev->guest_irq); |
73 | 103 | ||
74 | return IRQ_HANDLED; | 104 | return IRQ_HANDLED; |
75 | } | 105 | } |
76 | 106 | ||
107 | #ifdef __KVM_HAVE_MSI | ||
108 | static irqreturn_t kvm_assigned_dev_thread_msi(int irq, void *dev_id) | ||
109 | { | ||
110 | struct kvm_assigned_dev_kernel *assigned_dev = dev_id; | ||
111 | |||
112 | kvm_assigned_dev_raise_guest_irq(assigned_dev, | ||
113 | assigned_dev->guest_irq); | ||
114 | |||
115 | return IRQ_HANDLED; | ||
116 | } | ||
117 | #endif | ||
118 | |||
77 | #ifdef __KVM_HAVE_MSIX | 119 | #ifdef __KVM_HAVE_MSIX |
78 | static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id) | 120 | static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id) |
79 | { | 121 | { |
@@ -83,8 +125,7 @@ static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id) | |||
83 | 125 | ||
84 | if (index >= 0) { | 126 | if (index >= 0) { |
85 | vector = assigned_dev->guest_msix_entries[index].vector; | 127 | vector = assigned_dev->guest_msix_entries[index].vector; |
86 | kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, | 128 | kvm_assigned_dev_raise_guest_irq(assigned_dev, vector); |
87 | vector, 1); | ||
88 | } | 129 | } |
89 | 130 | ||
90 | return IRQ_HANDLED; | 131 | return IRQ_HANDLED; |
@@ -100,15 +141,31 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | |||
100 | 141 | ||
101 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); | 142 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); |
102 | 143 | ||
103 | /* The guest irq may be shared so this ack may be | 144 | spin_lock(&dev->intx_mask_lock); |
104 | * from another device. | 145 | |
105 | */ | 146 | if (!(dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) { |
106 | spin_lock(&dev->intx_lock); | 147 | bool reassert = false; |
107 | if (dev->host_irq_disabled) { | 148 | |
108 | enable_irq(dev->host_irq); | 149 | spin_lock_irq(&dev->intx_lock); |
109 | dev->host_irq_disabled = false; | 150 | /* |
151 | * The guest IRQ may be shared so this ack can come from an | ||
152 | * IRQ for another guest device. | ||
153 | */ | ||
154 | if (dev->host_irq_disabled) { | ||
155 | if (!(dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) | ||
156 | enable_irq(dev->host_irq); | ||
157 | else if (!pci_check_and_unmask_intx(dev->dev)) | ||
158 | reassert = true; | ||
159 | dev->host_irq_disabled = reassert; | ||
160 | } | ||
161 | spin_unlock_irq(&dev->intx_lock); | ||
162 | |||
163 | if (reassert) | ||
164 | kvm_set_irq(dev->kvm, dev->irq_source_id, | ||
165 | dev->guest_irq, 1); | ||
110 | } | 166 | } |
111 | spin_unlock(&dev->intx_lock); | 167 | |
168 | spin_unlock(&dev->intx_mask_lock); | ||
112 | } | 169 | } |
113 | 170 | ||
114 | static void deassign_guest_irq(struct kvm *kvm, | 171 | static void deassign_guest_irq(struct kvm *kvm, |
@@ -156,7 +213,15 @@ static void deassign_host_irq(struct kvm *kvm, | |||
156 | pci_disable_msix(assigned_dev->dev); | 213 | pci_disable_msix(assigned_dev->dev); |
157 | } else { | 214 | } else { |
158 | /* Deal with MSI and INTx */ | 215 | /* Deal with MSI and INTx */ |
159 | disable_irq(assigned_dev->host_irq); | 216 | if ((assigned_dev->irq_requested_type & |
217 | KVM_DEV_IRQ_HOST_INTX) && | ||
218 | (assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { | ||
219 | spin_lock_irq(&assigned_dev->intx_lock); | ||
220 | pci_intx(assigned_dev->dev, false); | ||
221 | spin_unlock_irq(&assigned_dev->intx_lock); | ||
222 | synchronize_irq(assigned_dev->host_irq); | ||
223 | } else | ||
224 | disable_irq(assigned_dev->host_irq); | ||
160 | 225 | ||
161 | free_irq(assigned_dev->host_irq, assigned_dev); | 226 | free_irq(assigned_dev->host_irq, assigned_dev); |
162 | 227 | ||
@@ -237,15 +302,34 @@ void kvm_free_all_assigned_devices(struct kvm *kvm) | |||
237 | static int assigned_device_enable_host_intx(struct kvm *kvm, | 302 | static int assigned_device_enable_host_intx(struct kvm *kvm, |
238 | struct kvm_assigned_dev_kernel *dev) | 303 | struct kvm_assigned_dev_kernel *dev) |
239 | { | 304 | { |
305 | irq_handler_t irq_handler; | ||
306 | unsigned long flags; | ||
307 | |||
240 | dev->host_irq = dev->dev->irq; | 308 | dev->host_irq = dev->dev->irq; |
241 | /* Even though this is PCI, we don't want to use shared | 309 | |
242 | * interrupts. Sharing host devices with guest-assigned devices | 310 | /* |
243 | * on the same interrupt line is not a happy situation: there | 311 | * We can only share the IRQ line with other host devices if we are |
244 | * are going to be long delays in accepting, acking, etc. | 312 | * able to disable the IRQ source at device-level - independently of |
313 | * the guest driver. Otherwise host devices may suffer from unbounded | ||
314 | * IRQ latencies when the guest keeps the line asserted. | ||
245 | */ | 315 | */ |
246 | if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread, | 316 | if (dev->flags & KVM_DEV_ASSIGN_PCI_2_3) { |
247 | IRQF_ONESHOT, dev->irq_name, dev)) | 317 | irq_handler = kvm_assigned_dev_intx; |
318 | flags = IRQF_SHARED; | ||
319 | } else { | ||
320 | irq_handler = NULL; | ||
321 | flags = IRQF_ONESHOT; | ||
322 | } | ||
323 | if (request_threaded_irq(dev->host_irq, irq_handler, | ||
324 | kvm_assigned_dev_thread_intx, flags, | ||
325 | dev->irq_name, dev)) | ||
248 | return -EIO; | 326 | return -EIO; |
327 | |||
328 | if (dev->flags & KVM_DEV_ASSIGN_PCI_2_3) { | ||
329 | spin_lock_irq(&dev->intx_lock); | ||
330 | pci_intx(dev->dev, true); | ||
331 | spin_unlock_irq(&dev->intx_lock); | ||
332 | } | ||
249 | return 0; | 333 | return 0; |
250 | } | 334 | } |
251 | 335 | ||
@@ -262,8 +346,9 @@ static int assigned_device_enable_host_msi(struct kvm *kvm, | |||
262 | } | 346 | } |
263 | 347 | ||
264 | dev->host_irq = dev->dev->irq; | 348 | dev->host_irq = dev->dev->irq; |
265 | if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread, | 349 | if (request_threaded_irq(dev->host_irq, NULL, |
266 | 0, dev->irq_name, dev)) { | 350 | kvm_assigned_dev_thread_msi, 0, |
351 | dev->irq_name, dev)) { | ||
267 | pci_disable_msi(dev->dev); | 352 | pci_disable_msi(dev->dev); |
268 | return -EIO; | 353 | return -EIO; |
269 | } | 354 | } |
@@ -321,7 +406,6 @@ static int assigned_device_enable_guest_msi(struct kvm *kvm, | |||
321 | { | 406 | { |
322 | dev->guest_irq = irq->guest_irq; | 407 | dev->guest_irq = irq->guest_irq; |
323 | dev->ack_notifier.gsi = -1; | 408 | dev->ack_notifier.gsi = -1; |
324 | dev->host_irq_disabled = false; | ||
325 | return 0; | 409 | return 0; |
326 | } | 410 | } |
327 | #endif | 411 | #endif |
@@ -333,7 +417,6 @@ static int assigned_device_enable_guest_msix(struct kvm *kvm, | |||
333 | { | 417 | { |
334 | dev->guest_irq = irq->guest_irq; | 418 | dev->guest_irq = irq->guest_irq; |
335 | dev->ack_notifier.gsi = -1; | 419 | dev->ack_notifier.gsi = -1; |
336 | dev->host_irq_disabled = false; | ||
337 | return 0; | 420 | return 0; |
338 | } | 421 | } |
339 | #endif | 422 | #endif |
@@ -367,6 +450,7 @@ static int assign_host_irq(struct kvm *kvm, | |||
367 | default: | 450 | default: |
368 | r = -EINVAL; | 451 | r = -EINVAL; |
369 | } | 452 | } |
453 | dev->host_irq_disabled = false; | ||
370 | 454 | ||
371 | if (!r) | 455 | if (!r) |
372 | dev->irq_requested_type |= host_irq_type; | 456 | dev->irq_requested_type |= host_irq_type; |
@@ -468,6 +552,7 @@ static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, | |||
468 | { | 552 | { |
469 | int r = -ENODEV; | 553 | int r = -ENODEV; |
470 | struct kvm_assigned_dev_kernel *match; | 554 | struct kvm_assigned_dev_kernel *match; |
555 | unsigned long irq_type; | ||
471 | 556 | ||
472 | mutex_lock(&kvm->lock); | 557 | mutex_lock(&kvm->lock); |
473 | 558 | ||
@@ -476,7 +561,9 @@ static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, | |||
476 | if (!match) | 561 | if (!match) |
477 | goto out; | 562 | goto out; |
478 | 563 | ||
479 | r = kvm_deassign_irq(kvm, match, assigned_irq->flags); | 564 | irq_type = assigned_irq->flags & (KVM_DEV_IRQ_HOST_MASK | |
565 | KVM_DEV_IRQ_GUEST_MASK); | ||
566 | r = kvm_deassign_irq(kvm, match, irq_type); | ||
480 | out: | 567 | out: |
481 | mutex_unlock(&kvm->lock); | 568 | mutex_unlock(&kvm->lock); |
482 | return r; | 569 | return r; |
@@ -609,6 +696,10 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, | |||
609 | if (!match->pci_saved_state) | 696 | if (!match->pci_saved_state) |
610 | printk(KERN_DEBUG "%s: Couldn't store %s saved state\n", | 697 | printk(KERN_DEBUG "%s: Couldn't store %s saved state\n", |
611 | __func__, dev_name(&dev->dev)); | 698 | __func__, dev_name(&dev->dev)); |
699 | |||
700 | if (!pci_intx_mask_supported(dev)) | ||
701 | assigned_dev->flags &= ~KVM_DEV_ASSIGN_PCI_2_3; | ||
702 | |||
612 | match->assigned_dev_id = assigned_dev->assigned_dev_id; | 703 | match->assigned_dev_id = assigned_dev->assigned_dev_id; |
613 | match->host_segnr = assigned_dev->segnr; | 704 | match->host_segnr = assigned_dev->segnr; |
614 | match->host_busnr = assigned_dev->busnr; | 705 | match->host_busnr = assigned_dev->busnr; |
@@ -616,6 +707,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, | |||
616 | match->flags = assigned_dev->flags; | 707 | match->flags = assigned_dev->flags; |
617 | match->dev = dev; | 708 | match->dev = dev; |
618 | spin_lock_init(&match->intx_lock); | 709 | spin_lock_init(&match->intx_lock); |
710 | spin_lock_init(&match->intx_mask_lock); | ||
619 | match->irq_source_id = -1; | 711 | match->irq_source_id = -1; |
620 | match->kvm = kvm; | 712 | match->kvm = kvm; |
621 | match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; | 713 | match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; |
@@ -761,6 +853,55 @@ msix_entry_out: | |||
761 | } | 853 | } |
762 | #endif | 854 | #endif |
763 | 855 | ||
856 | static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm, | ||
857 | struct kvm_assigned_pci_dev *assigned_dev) | ||
858 | { | ||
859 | int r = 0; | ||
860 | struct kvm_assigned_dev_kernel *match; | ||
861 | |||
862 | mutex_lock(&kvm->lock); | ||
863 | |||
864 | match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, | ||
865 | assigned_dev->assigned_dev_id); | ||
866 | if (!match) { | ||
867 | r = -ENODEV; | ||
868 | goto out; | ||
869 | } | ||
870 | |||
871 | spin_lock(&match->intx_mask_lock); | ||
872 | |||
873 | match->flags &= ~KVM_DEV_ASSIGN_MASK_INTX; | ||
874 | match->flags |= assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX; | ||
875 | |||
876 | if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { | ||
877 | if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) { | ||
878 | kvm_set_irq(match->kvm, match->irq_source_id, | ||
879 | match->guest_irq, 0); | ||
880 | /* | ||
881 | * Masking at hardware-level is performed on demand, | ||
882 | * i.e. when an IRQ actually arrives at the host. | ||
883 | */ | ||
884 | } else if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { | ||
885 | /* | ||
886 | * Unmask the IRQ line if required. Unmasking at | ||
887 | * device level will be performed by user space. | ||
888 | */ | ||
889 | spin_lock_irq(&match->intx_lock); | ||
890 | if (match->host_irq_disabled) { | ||
891 | enable_irq(match->host_irq); | ||
892 | match->host_irq_disabled = false; | ||
893 | } | ||
894 | spin_unlock_irq(&match->intx_lock); | ||
895 | } | ||
896 | } | ||
897 | |||
898 | spin_unlock(&match->intx_mask_lock); | ||
899 | |||
900 | out: | ||
901 | mutex_unlock(&kvm->lock); | ||
902 | return r; | ||
903 | } | ||
904 | |||
764 | long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | 905 | long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, |
765 | unsigned long arg) | 906 | unsigned long arg) |
766 | { | 907 | { |
@@ -868,6 +1009,15 @@ long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | |||
868 | break; | 1009 | break; |
869 | } | 1010 | } |
870 | #endif | 1011 | #endif |
1012 | case KVM_ASSIGN_SET_INTX_MASK: { | ||
1013 | struct kvm_assigned_pci_dev assigned_dev; | ||
1014 | |||
1015 | r = -EFAULT; | ||
1016 | if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) | ||
1017 | goto out; | ||
1018 | r = kvm_vm_ioctl_set_pci_irq_mask(kvm, &assigned_dev); | ||
1019 | break; | ||
1020 | } | ||
871 | default: | 1021 | default: |
872 | r = -ENOTTY; | 1022 | r = -ENOTTY; |
873 | break; | 1023 | break; |
@@ -875,4 +1025,3 @@ long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, | |||
875 | out: | 1025 | out: |
876 | return r; | 1026 | return r; |
877 | } | 1027 | } |
878 | |||