diff options
| author | Paolo Bonzini <pbonzini@redhat.com> | 2014-03-18 07:00:14 -0400 |
|---|---|---|
| committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-03-21 05:20:16 -0400 |
| commit | 44847dea79751e95665a439f8c63a65e51da8e1f (patch) | |
| tree | 1329f713872d1034bfb2c1b80d483ffa0e72777c | |
| parent | 0bc830b05c667218d703f2026ec866c49df974fc (diff) | |
KVM: ioapic: extract body of kvm_ioapic_set_irq
We will reuse it to process a nonzero IRR that is passed to KVM_SET_IRQCHIP.
Reviewed-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| -rw-r--r-- | virt/kvm/ioapic.c | 74 |
1 files changed, 50 insertions, 24 deletions
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 25e16a6898ed..270f7fe73f39 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
| @@ -163,6 +163,55 @@ static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic) | |||
| 163 | return false; | 163 | return false; |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq, | ||
| 167 | int irq_level, bool line_status) | ||
| 168 | { | ||
| 169 | union kvm_ioapic_redirect_entry entry; | ||
| 170 | u32 mask = 1 << irq; | ||
| 171 | u32 old_irr; | ||
| 172 | int edge, ret; | ||
| 173 | |||
| 174 | entry = ioapic->redirtbl[irq]; | ||
| 175 | edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); | ||
| 176 | |||
| 177 | if (!irq_level) { | ||
| 178 | ioapic->irr &= ~mask; | ||
| 179 | ret = 1; | ||
| 180 | goto out; | ||
| 181 | } | ||
| 182 | |||
| 183 | /* | ||
| 184 | * Return 0 for coalesced interrupts; for edge-triggered interrupts, | ||
| 185 | * this only happens if a previous edge has not been delivered due | ||
| 186 | * do masking. For level interrupts, the remote_irr field tells | ||
| 187 | * us if the interrupt is waiting for an EOI. | ||
| 188 | * | ||
| 189 | * RTC is special: it is edge-triggered, but userspace likes to know | ||
| 190 | * if it has been already ack-ed via EOI because coalesced RTC | ||
| 191 | * interrupts lead to time drift in Windows guests. So we track | ||
| 192 | * EOI manually for the RTC interrupt. | ||
| 193 | */ | ||
| 194 | if (irq == RTC_GSI && line_status && | ||
| 195 | rtc_irq_check_coalesced(ioapic)) { | ||
| 196 | ret = 0; | ||
| 197 | goto out; | ||
| 198 | } | ||
| 199 | |||
| 200 | old_irr = ioapic->irr; | ||
| 201 | ioapic->irr |= mask; | ||
| 202 | if ((edge && old_irr == ioapic->irr) || | ||
| 203 | (!edge && entry.fields.remote_irr)) { | ||
| 204 | ret = 0; | ||
| 205 | goto out; | ||
| 206 | } | ||
| 207 | |||
| 208 | ret = ioapic_service(ioapic, irq, line_status); | ||
| 209 | |||
| 210 | out: | ||
| 211 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); | ||
| 212 | return ret; | ||
| 213 | } | ||
| 214 | |||
| 166 | static void update_handled_vectors(struct kvm_ioapic *ioapic) | 215 | static void update_handled_vectors(struct kvm_ioapic *ioapic) |
| 167 | { | 216 | { |
| 168 | DECLARE_BITMAP(handled_vectors, 256); | 217 | DECLARE_BITMAP(handled_vectors, 256); |
| @@ -308,38 +357,15 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status) | |||
| 308 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, | 357 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, |
| 309 | int level, bool line_status) | 358 | int level, bool line_status) |
| 310 | { | 359 | { |
| 311 | u32 old_irr; | ||
| 312 | u32 mask = 1 << irq; | ||
| 313 | union kvm_ioapic_redirect_entry entry; | ||
| 314 | int ret, irq_level; | 360 | int ret, irq_level; |
| 315 | 361 | ||
| 316 | BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS); | 362 | BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS); |
| 317 | 363 | ||
| 318 | spin_lock(&ioapic->lock); | 364 | spin_lock(&ioapic->lock); |
| 319 | old_irr = ioapic->irr; | ||
| 320 | irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq], | 365 | irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq], |
| 321 | irq_source_id, level); | 366 | irq_source_id, level); |
| 322 | entry = ioapic->redirtbl[irq]; | 367 | ret = ioapic_set_irq(ioapic, irq, irq_level, line_status); |
| 323 | if (!irq_level) { | ||
| 324 | ioapic->irr &= ~mask; | ||
| 325 | ret = 1; | ||
| 326 | } else { | ||
| 327 | int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); | ||
| 328 | 368 | ||
| 329 | if (irq == RTC_GSI && line_status && | ||
| 330 | rtc_irq_check_coalesced(ioapic)) { | ||
| 331 | ret = 0; /* coalesced */ | ||
| 332 | goto out; | ||
| 333 | } | ||
| 334 | ioapic->irr |= mask; | ||
| 335 | if ((edge && old_irr != ioapic->irr) || | ||
| 336 | (!edge && !entry.fields.remote_irr)) | ||
| 337 | ret = ioapic_service(ioapic, irq, line_status); | ||
| 338 | else | ||
| 339 | ret = 0; /* report coalesced interrupt */ | ||
| 340 | } | ||
| 341 | out: | ||
| 342 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); | ||
| 343 | spin_unlock(&ioapic->lock); | 369 | spin_unlock(&ioapic->lock); |
| 344 | 370 | ||
| 345 | return ret; | 371 | return ret; |
