aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorYang Zhang <yang.z.zhang@Intel.com>2013-04-11 07:21:40 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2013-04-15 22:20:34 -0400
commitaa2fbe6d44892070d78995f0df875ce930904e29 (patch)
treebc2350a65c4e656df5597ebe4111bcfd98116bfe /virt
parentf3bff6318fa0f54956b02ed451d9b120441006ea (diff)
KVM: Let ioapic know the irq line status
Userspace may deliver RTC interrupt without query the status. So we want to track RTC EOI for this case. Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> Reviewed-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/assigned-dev.c13
-rw-r--r--virt/kvm/eventfd.c15
-rw-r--r--virt/kvm/ioapic.c18
-rw-r--r--virt/kvm/ioapic.h2
-rw-r--r--virt/kvm/irq_comm.c19
-rw-r--r--virt/kvm/kvm_main.c3
6 files changed, 41 insertions, 29 deletions
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 3642239252b0..f4c7f591b5d8 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -80,11 +80,12 @@ kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev,
80 spin_lock(&assigned_dev->intx_mask_lock); 80 spin_lock(&assigned_dev->intx_mask_lock);
81 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) 81 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX))
82 kvm_set_irq(assigned_dev->kvm, 82 kvm_set_irq(assigned_dev->kvm,
83 assigned_dev->irq_source_id, vector, 1); 83 assigned_dev->irq_source_id, vector, 1,
84 false);
84 spin_unlock(&assigned_dev->intx_mask_lock); 85 spin_unlock(&assigned_dev->intx_mask_lock);
85 } else 86 } else
86 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 87 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
87 vector, 1); 88 vector, 1, false);
88} 89}
89 90
90static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id) 91static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id)
@@ -165,7 +166,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
165 container_of(kian, struct kvm_assigned_dev_kernel, 166 container_of(kian, struct kvm_assigned_dev_kernel,
166 ack_notifier); 167 ack_notifier);
167 168
168 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); 169 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0, false);
169 170
170 spin_lock(&dev->intx_mask_lock); 171 spin_lock(&dev->intx_mask_lock);
171 172
@@ -188,7 +189,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
188 189
189 if (reassert) 190 if (reassert)
190 kvm_set_irq(dev->kvm, dev->irq_source_id, 191 kvm_set_irq(dev->kvm, dev->irq_source_id,
191 dev->guest_irq, 1); 192 dev->guest_irq, 1, false);
192 } 193 }
193 194
194 spin_unlock(&dev->intx_mask_lock); 195 spin_unlock(&dev->intx_mask_lock);
@@ -202,7 +203,7 @@ static void deassign_guest_irq(struct kvm *kvm,
202 &assigned_dev->ack_notifier); 203 &assigned_dev->ack_notifier);
203 204
204 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 205 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
205 assigned_dev->guest_irq, 0); 206 assigned_dev->guest_irq, 0, false);
206 207
207 if (assigned_dev->irq_source_id != -1) 208 if (assigned_dev->irq_source_id != -1)
208 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); 209 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
@@ -901,7 +902,7 @@ static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm,
901 if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { 902 if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
902 if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) { 903 if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) {
903 kvm_set_irq(match->kvm, match->irq_source_id, 904 kvm_set_irq(match->kvm, match->irq_source_id,
904 match->guest_irq, 0); 905 match->guest_irq, 0, false);
905 /* 906 /*
906 * Masking at hardware-level is performed on demand, 907 * Masking at hardware-level is performed on demand,
907 * i.e. when an IRQ actually arrives at the host. 908 * i.e. when an IRQ actually arrives at the host.
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 48790989f8d2..c5d43ffbf1f3 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -100,11 +100,13 @@ irqfd_inject(struct work_struct *work)
100 struct kvm *kvm = irqfd->kvm; 100 struct kvm *kvm = irqfd->kvm;
101 101
102 if (!irqfd->resampler) { 102 if (!irqfd->resampler) {
103 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1); 103 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
104 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0); 104 false);
105 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
106 false);
105 } else 107 } else
106 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 108 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
107 irqfd->gsi, 1); 109 irqfd->gsi, 1, false);
108} 110}
109 111
110/* 112/*
@@ -121,7 +123,7 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
121 resampler = container_of(kian, struct _irqfd_resampler, notifier); 123 resampler = container_of(kian, struct _irqfd_resampler, notifier);
122 124
123 kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 125 kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
124 resampler->notifier.gsi, 0); 126 resampler->notifier.gsi, 0, false);
125 127
126 rcu_read_lock(); 128 rcu_read_lock();
127 129
@@ -146,7 +148,7 @@ irqfd_resampler_shutdown(struct _irqfd *irqfd)
146 list_del(&resampler->link); 148 list_del(&resampler->link);
147 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); 149 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
148 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 150 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
149 resampler->notifier.gsi, 0); 151 resampler->notifier.gsi, 0, false);
150 kfree(resampler); 152 kfree(resampler);
151 } 153 }
152 154
@@ -225,7 +227,8 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
225 irq = rcu_dereference(irqfd->irq_entry); 227 irq = rcu_dereference(irqfd->irq_entry);
226 /* An event has been signaled, inject an interrupt */ 228 /* An event has been signaled, inject an interrupt */
227 if (irq) 229 if (irq)
228 kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1); 230 kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
231 false);
229 else 232 else
230 schedule_work(&irqfd->inject); 233 schedule_work(&irqfd->inject);
231 rcu_read_unlock(); 234 rcu_read_unlock();
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 76528fff252d..a49fcd55b378 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -50,7 +50,8 @@
50#else 50#else
51#define ioapic_debug(fmt, arg...) 51#define ioapic_debug(fmt, arg...)
52#endif 52#endif
53static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq); 53static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq,
54 bool line_status);
54 55
55static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, 56static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
56 unsigned long addr, 57 unsigned long addr,
@@ -146,7 +147,8 @@ static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
146 __rtc_irq_eoi_tracking_restore_one(vcpu); 147 __rtc_irq_eoi_tracking_restore_one(vcpu);
147} 148}
148 149
149static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) 150static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx,
151 bool line_status)
150{ 152{
151 union kvm_ioapic_redirect_entry *pent; 153 union kvm_ioapic_redirect_entry *pent;
152 int injected = -1; 154 int injected = -1;
@@ -154,7 +156,7 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
154 pent = &ioapic->redirtbl[idx]; 156 pent = &ioapic->redirtbl[idx];
155 157
156 if (!pent->fields.mask) { 158 if (!pent->fields.mask) {
157 injected = ioapic_deliver(ioapic, idx); 159 injected = ioapic_deliver(ioapic, idx, line_status);
158 if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) 160 if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
159 pent->fields.remote_irr = 1; 161 pent->fields.remote_irr = 1;
160 } 162 }
@@ -248,13 +250,13 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
248 kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); 250 kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
249 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG 251 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
250 && ioapic->irr & (1 << index)) 252 && ioapic->irr & (1 << index))
251 ioapic_service(ioapic, index); 253 ioapic_service(ioapic, index, false);
252 kvm_ioapic_make_eoibitmap_request(ioapic->kvm); 254 kvm_ioapic_make_eoibitmap_request(ioapic->kvm);
253 break; 255 break;
254 } 256 }
255} 257}
256 258
257static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) 259static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status)
258{ 260{
259 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; 261 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
260 struct kvm_lapic_irq irqe; 262 struct kvm_lapic_irq irqe;
@@ -277,7 +279,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
277} 279}
278 280
279int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, 281int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
280 int level) 282 int level, bool line_status)
281{ 283{
282 u32 old_irr; 284 u32 old_irr;
283 u32 mask = 1 << irq; 285 u32 mask = 1 << irq;
@@ -300,7 +302,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
300 ioapic->irr |= mask; 302 ioapic->irr |= mask;
301 if ((edge && old_irr != ioapic->irr) || 303 if ((edge && old_irr != ioapic->irr) ||
302 (!edge && !entry.fields.remote_irr)) 304 (!edge && !entry.fields.remote_irr))
303 ret = ioapic_service(ioapic, irq); 305 ret = ioapic_service(ioapic, irq, line_status);
304 else 306 else
305 ret = 0; /* report coalesced interrupt */ 307 ret = 0; /* report coalesced interrupt */
306 } 308 }
@@ -349,7 +351,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
349 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); 351 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
350 ent->fields.remote_irr = 0; 352 ent->fields.remote_irr = 0;
351 if (!ent->fields.mask && (ioapic->irr & (1 << i))) 353 if (!ent->fields.mask && (ioapic->irr & (1 << i)))
352 ioapic_service(ioapic, i); 354 ioapic_service(ioapic, i, false);
353 } 355 }
354} 356}
355 357
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 313fc4ea61d9..554157bbb586 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -89,7 +89,7 @@ bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector);
89int kvm_ioapic_init(struct kvm *kvm); 89int kvm_ioapic_init(struct kvm *kvm);
90void kvm_ioapic_destroy(struct kvm *kvm); 90void kvm_ioapic_destroy(struct kvm *kvm);
91int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, 91int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
92 int level); 92 int level, bool line_status);
93void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id); 93void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
94void kvm_ioapic_reset(struct kvm_ioapic *ioapic); 94void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
95int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, 95int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 2f07d2e59a2d..8efb580edfef 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -35,7 +35,8 @@
35#include "ioapic.h" 35#include "ioapic.h"
36 36
37static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, 37static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
38 struct kvm *kvm, int irq_source_id, int level) 38 struct kvm *kvm, int irq_source_id, int level,
39 bool line_status)
39{ 40{
40#ifdef CONFIG_X86 41#ifdef CONFIG_X86
41 struct kvm_pic *pic = pic_irqchip(kvm); 42 struct kvm_pic *pic = pic_irqchip(kvm);
@@ -46,10 +47,12 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
46} 47}
47 48
48static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, 49static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
49 struct kvm *kvm, int irq_source_id, int level) 50 struct kvm *kvm, int irq_source_id, int level,
51 bool line_status)
50{ 52{
51 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 53 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
52 return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level); 54 return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
55 line_status);
53} 56}
54 57
55inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq) 58inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
@@ -121,7 +124,7 @@ static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
121} 124}
122 125
123int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, 126int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
124 struct kvm *kvm, int irq_source_id, int level) 127 struct kvm *kvm, int irq_source_id, int level, bool line_status)
125{ 128{
126 struct kvm_lapic_irq irq; 129 struct kvm_lapic_irq irq;
127 130
@@ -159,7 +162,7 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
159 route.msi.address_hi = msi->address_hi; 162 route.msi.address_hi = msi->address_hi;
160 route.msi.data = msi->data; 163 route.msi.data = msi->data;
161 164
162 return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1); 165 return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false);
163} 166}
164 167
165/* 168/*
@@ -168,7 +171,8 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
168 * = 0 Interrupt was coalesced (previous irq is still pending) 171 * = 0 Interrupt was coalesced (previous irq is still pending)
169 * > 0 Number of CPUs interrupt was delivered to 172 * > 0 Number of CPUs interrupt was delivered to
170 */ 173 */
171int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level) 174int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
175 bool line_status)
172{ 176{
173 struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS]; 177 struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
174 int ret = -1, i = 0; 178 int ret = -1, i = 0;
@@ -189,7 +193,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
189 193
190 while(i--) { 194 while(i--) {
191 int r; 195 int r;
192 r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level); 196 r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level,
197 line_status);
193 if (r < 0) 198 if (r < 0)
194 continue; 199 continue;
195 200
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5cc53c907d3b..ac3182eed462 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2258,7 +2258,8 @@ static long kvm_vm_ioctl(struct file *filp,
2258 if (copy_from_user(&irq_event, argp, sizeof irq_event)) 2258 if (copy_from_user(&irq_event, argp, sizeof irq_event))
2259 goto out; 2259 goto out;
2260 2260
2261 r = kvm_vm_ioctl_irq_line(kvm, &irq_event); 2261 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
2262 ioctl == KVM_IRQ_LINE_STATUS);
2262 if (r) 2263 if (r)
2263 goto out; 2264 goto out;
2264 2265