diff options
| author | Paolo Bonzini <pbonzini@redhat.com> | 2014-11-20 07:45:31 -0500 |
|---|---|---|
| committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-11-21 12:02:37 -0500 |
| commit | 6ef768fac9dfe3404d3fdc09909ea203a88f2f38 (patch) | |
| tree | 6bfd08a6a7527bc94b015fe7c0177af51efa5434 /virt | |
| parent | c32a42721ce67594e4481a961aa149055de9c1d9 (diff) | |
kvm: x86: move ioapic.c and irq_comm.c back to arch/x86/
ia64 does not need them anymore. Ack notifiers become x86-specific
too.
Suggested-by: Gleb Natapov <gleb@kernel.org>
Reviewed-by: Radim Krcmar <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
| -rw-r--r-- | virt/kvm/eventfd.c | 7 | ||||
| -rw-r--r-- | virt/kvm/ioapic.c | 682 | ||||
| -rw-r--r-- | virt/kvm/ioapic.h | 103 | ||||
| -rw-r--r-- | virt/kvm/irq_comm.c | 347 | ||||
| -rw-r--r-- | virt/kvm/kvm_main.c | 3 |
5 files changed, 0 insertions, 1142 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index b0fb390943c6..148b2392c762 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
| @@ -36,9 +36,6 @@ | |||
| 36 | #include <linux/seqlock.h> | 36 | #include <linux/seqlock.h> |
| 37 | #include <trace/events/kvm.h> | 37 | #include <trace/events/kvm.h> |
| 38 | 38 | ||
| 39 | #ifdef __KVM_HAVE_IOAPIC | ||
| 40 | #include "ioapic.h" | ||
| 41 | #endif | ||
| 42 | #include "iodev.h" | 39 | #include "iodev.h" |
| 43 | 40 | ||
| 44 | #ifdef CONFIG_HAVE_KVM_IRQFD | 41 | #ifdef CONFIG_HAVE_KVM_IRQFD |
| @@ -492,9 +489,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm, | |||
| 492 | mutex_lock(&kvm->irq_lock); | 489 | mutex_lock(&kvm->irq_lock); |
| 493 | hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); | 490 | hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); |
| 494 | mutex_unlock(&kvm->irq_lock); | 491 | mutex_unlock(&kvm->irq_lock); |
| 495 | #ifdef __KVM_HAVE_IOAPIC | ||
| 496 | kvm_vcpu_request_scan_ioapic(kvm); | 492 | kvm_vcpu_request_scan_ioapic(kvm); |
| 497 | #endif | ||
| 498 | } | 493 | } |
| 499 | 494 | ||
| 500 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, | 495 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, |
| @@ -504,9 +499,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm, | |||
| 504 | hlist_del_init_rcu(&kian->link); | 499 | hlist_del_init_rcu(&kian->link); |
| 505 | mutex_unlock(&kvm->irq_lock); | 500 | mutex_unlock(&kvm->irq_lock); |
| 506 | synchronize_srcu(&kvm->irq_srcu); | 501 | synchronize_srcu(&kvm->irq_srcu); |
| 507 | #ifdef __KVM_HAVE_IOAPIC | ||
| 508 | kvm_vcpu_request_scan_ioapic(kvm); | 502 | kvm_vcpu_request_scan_ioapic(kvm); |
| 509 | #endif | ||
| 510 | } | 503 | } |
| 511 | #endif | 504 | #endif |
| 512 | 505 | ||
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c deleted file mode 100644 index f0f7ef82b7a6..000000000000 --- a/virt/kvm/ioapic.c +++ /dev/null | |||
| @@ -1,682 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2001 MandrakeSoft S.A. | ||
| 3 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | ||
| 4 | * | ||
| 5 | * MandrakeSoft S.A. | ||
| 6 | * 43, rue d'Aboukir | ||
| 7 | * 75002 Paris - France | ||
| 8 | * http://www.linux-mandrake.com/ | ||
| 9 | * http://www.mandrakesoft.com/ | ||
| 10 | * | ||
| 11 | * This library is free software; you can redistribute it and/or | ||
| 12 | * modify it under the terms of the GNU Lesser General Public | ||
| 13 | * License as published by the Free Software Foundation; either | ||
| 14 | * version 2 of the License, or (at your option) any later version. | ||
| 15 | * | ||
| 16 | * This library is distributed in the hope that it will be useful, | ||
| 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 19 | * Lesser General Public License for more details. | ||
| 20 | * | ||
| 21 | * You should have received a copy of the GNU Lesser General Public | ||
| 22 | * License along with this library; if not, write to the Free Software | ||
| 23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 24 | * | ||
| 25 | * Yunhong Jiang <yunhong.jiang@intel.com> | ||
| 26 | * Yaozu (Eddie) Dong <eddie.dong@intel.com> | ||
| 27 | * Based on Xen 3.1 code. | ||
| 28 | */ | ||
| 29 | |||
| 30 | #include <linux/kvm_host.h> | ||
| 31 | #include <linux/kvm.h> | ||
| 32 | #include <linux/mm.h> | ||
| 33 | #include <linux/highmem.h> | ||
| 34 | #include <linux/smp.h> | ||
| 35 | #include <linux/hrtimer.h> | ||
| 36 | #include <linux/io.h> | ||
| 37 | #include <linux/slab.h> | ||
| 38 | #include <linux/export.h> | ||
| 39 | #include <asm/processor.h> | ||
| 40 | #include <asm/page.h> | ||
| 41 | #include <asm/current.h> | ||
| 42 | #include <trace/events/kvm.h> | ||
| 43 | |||
| 44 | #include "ioapic.h" | ||
| 45 | #include "lapic.h" | ||
| 46 | #include "irq.h" | ||
| 47 | |||
| 48 | #if 0 | ||
| 49 | #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) | ||
| 50 | #else | ||
| 51 | #define ioapic_debug(fmt, arg...) | ||
| 52 | #endif | ||
| 53 | static int ioapic_service(struct kvm_ioapic *vioapic, int irq, | ||
| 54 | bool line_status); | ||
| 55 | |||
| 56 | static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, | ||
| 57 | unsigned long addr, | ||
| 58 | unsigned long length) | ||
| 59 | { | ||
| 60 | unsigned long result = 0; | ||
| 61 | |||
| 62 | switch (ioapic->ioregsel) { | ||
| 63 | case IOAPIC_REG_VERSION: | ||
| 64 | result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16) | ||
| 65 | | (IOAPIC_VERSION_ID & 0xff)); | ||
| 66 | break; | ||
| 67 | |||
| 68 | case IOAPIC_REG_APIC_ID: | ||
| 69 | case IOAPIC_REG_ARB_ID: | ||
| 70 | result = ((ioapic->id & 0xf) << 24); | ||
| 71 | break; | ||
| 72 | |||
| 73 | default: | ||
| 74 | { | ||
| 75 | u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; | ||
| 76 | u64 redir_content; | ||
| 77 | |||
| 78 | if (redir_index < IOAPIC_NUM_PINS) | ||
| 79 | redir_content = | ||
| 80 | ioapic->redirtbl[redir_index].bits; | ||
| 81 | else | ||
| 82 | redir_content = ~0ULL; | ||
| 83 | |||
| 84 | result = (ioapic->ioregsel & 0x1) ? | ||
| 85 | (redir_content >> 32) & 0xffffffff : | ||
| 86 | redir_content & 0xffffffff; | ||
| 87 | break; | ||
| 88 | } | ||
| 89 | } | ||
| 90 | |||
| 91 | return result; | ||
| 92 | } | ||
| 93 | |||
| 94 | static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) | ||
| 95 | { | ||
| 96 | ioapic->rtc_status.pending_eoi = 0; | ||
| 97 | bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS); | ||
| 98 | } | ||
| 99 | |||
| 100 | static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); | ||
| 101 | |||
| 102 | static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic) | ||
| 103 | { | ||
| 104 | if (WARN_ON(ioapic->rtc_status.pending_eoi < 0)) | ||
| 105 | kvm_rtc_eoi_tracking_restore_all(ioapic); | ||
| 106 | } | ||
| 107 | |||
| 108 | static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) | ||
| 109 | { | ||
| 110 | bool new_val, old_val; | ||
| 111 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; | ||
| 112 | union kvm_ioapic_redirect_entry *e; | ||
| 113 | |||
| 114 | e = &ioapic->redirtbl[RTC_GSI]; | ||
| 115 | if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id, | ||
| 116 | e->fields.dest_mode)) | ||
| 117 | return; | ||
| 118 | |||
| 119 | new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector); | ||
| 120 | old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); | ||
| 121 | |||
| 122 | if (new_val == old_val) | ||
| 123 | return; | ||
| 124 | |||
| 125 | if (new_val) { | ||
| 126 | __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); | ||
| 127 | ioapic->rtc_status.pending_eoi++; | ||
| 128 | } else { | ||
| 129 | __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); | ||
| 130 | ioapic->rtc_status.pending_eoi--; | ||
| 131 | rtc_status_pending_eoi_check_valid(ioapic); | ||
| 132 | } | ||
| 133 | } | ||
| 134 | |||
| 135 | void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) | ||
| 136 | { | ||
| 137 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; | ||
| 138 | |||
| 139 | spin_lock(&ioapic->lock); | ||
| 140 | __rtc_irq_eoi_tracking_restore_one(vcpu); | ||
| 141 | spin_unlock(&ioapic->lock); | ||
| 142 | } | ||
| 143 | |||
| 144 | static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic) | ||
| 145 | { | ||
| 146 | struct kvm_vcpu *vcpu; | ||
| 147 | int i; | ||
| 148 | |||
| 149 | if (RTC_GSI >= IOAPIC_NUM_PINS) | ||
| 150 | return; | ||
| 151 | |||
| 152 | rtc_irq_eoi_tracking_reset(ioapic); | ||
| 153 | kvm_for_each_vcpu(i, vcpu, ioapic->kvm) | ||
| 154 | __rtc_irq_eoi_tracking_restore_one(vcpu); | ||
| 155 | } | ||
| 156 | |||
| 157 | static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu) | ||
| 158 | { | ||
| 159 | if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) { | ||
| 160 | --ioapic->rtc_status.pending_eoi; | ||
| 161 | rtc_status_pending_eoi_check_valid(ioapic); | ||
| 162 | } | ||
| 163 | } | ||
| 164 | |||
| 165 | static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic) | ||
| 166 | { | ||
| 167 | if (ioapic->rtc_status.pending_eoi > 0) | ||
| 168 | return true; /* coalesced */ | ||
| 169 | |||
| 170 | return false; | ||
| 171 | } | ||
| 172 | |||
| 173 | static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq, | ||
| 174 | int irq_level, bool line_status) | ||
| 175 | { | ||
| 176 | union kvm_ioapic_redirect_entry entry; | ||
| 177 | u32 mask = 1 << irq; | ||
| 178 | u32 old_irr; | ||
| 179 | int edge, ret; | ||
| 180 | |||
| 181 | entry = ioapic->redirtbl[irq]; | ||
| 182 | edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); | ||
| 183 | |||
| 184 | if (!irq_level) { | ||
| 185 | ioapic->irr &= ~mask; | ||
| 186 | ret = 1; | ||
| 187 | goto out; | ||
| 188 | } | ||
| 189 | |||
| 190 | /* | ||
| 191 | * Return 0 for coalesced interrupts; for edge-triggered interrupts, | ||
| 192 | * this only happens if a previous edge has not been delivered due | ||
| 193 | * do masking. For level interrupts, the remote_irr field tells | ||
| 194 | * us if the interrupt is waiting for an EOI. | ||
| 195 | * | ||
| 196 | * RTC is special: it is edge-triggered, but userspace likes to know | ||
| 197 | * if it has been already ack-ed via EOI because coalesced RTC | ||
| 198 | * interrupts lead to time drift in Windows guests. So we track | ||
| 199 | * EOI manually for the RTC interrupt. | ||
| 200 | */ | ||
| 201 | if (irq == RTC_GSI && line_status && | ||
| 202 | rtc_irq_check_coalesced(ioapic)) { | ||
| 203 | ret = 0; | ||
| 204 | goto out; | ||
| 205 | } | ||
| 206 | |||
| 207 | old_irr = ioapic->irr; | ||
| 208 | ioapic->irr |= mask; | ||
| 209 | if ((edge && old_irr == ioapic->irr) || | ||
| 210 | (!edge && entry.fields.remote_irr)) { | ||
| 211 | ret = 0; | ||
| 212 | goto out; | ||
| 213 | } | ||
| 214 | |||
| 215 | ret = ioapic_service(ioapic, irq, line_status); | ||
| 216 | |||
| 217 | out: | ||
| 218 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); | ||
| 219 | return ret; | ||
| 220 | } | ||
| 221 | |||
| 222 | static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr) | ||
| 223 | { | ||
| 224 | u32 idx; | ||
| 225 | |||
| 226 | rtc_irq_eoi_tracking_reset(ioapic); | ||
| 227 | for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS) | ||
| 228 | ioapic_set_irq(ioapic, idx, 1, true); | ||
| 229 | |||
| 230 | kvm_rtc_eoi_tracking_restore_all(ioapic); | ||
| 231 | } | ||
| 232 | |||
| 233 | |||
| 234 | static void update_handled_vectors(struct kvm_ioapic *ioapic) | ||
| 235 | { | ||
| 236 | DECLARE_BITMAP(handled_vectors, 256); | ||
| 237 | int i; | ||
| 238 | |||
| 239 | memset(handled_vectors, 0, sizeof(handled_vectors)); | ||
| 240 | for (i = 0; i < IOAPIC_NUM_PINS; ++i) | ||
| 241 | __set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors); | ||
| 242 | memcpy(ioapic->handled_vectors, handled_vectors, | ||
| 243 | sizeof(handled_vectors)); | ||
| 244 | smp_wmb(); | ||
| 245 | } | ||
| 246 | |||
| 247 | void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap, | ||
| 248 | u32 *tmr) | ||
| 249 | { | ||
| 250 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; | ||
| 251 | union kvm_ioapic_redirect_entry *e; | ||
| 252 | int index; | ||
| 253 | |||
| 254 | spin_lock(&ioapic->lock); | ||
| 255 | for (index = 0; index < IOAPIC_NUM_PINS; index++) { | ||
| 256 | e = &ioapic->redirtbl[index]; | ||
| 257 | if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || | ||
| 258 | kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) || | ||
| 259 | index == RTC_GSI) { | ||
| 260 | if (kvm_apic_match_dest(vcpu, NULL, 0, | ||
| 261 | e->fields.dest_id, e->fields.dest_mode)) { | ||
| 262 | __set_bit(e->fields.vector, | ||
| 263 | (unsigned long *)eoi_exit_bitmap); | ||
| 264 | if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG) | ||
| 265 | __set_bit(e->fields.vector, | ||
| 266 | (unsigned long *)tmr); | ||
| 267 | } | ||
| 268 | } | ||
| 269 | } | ||
| 270 | spin_unlock(&ioapic->lock); | ||
| 271 | } | ||
| 272 | |||
| 273 | #ifdef CONFIG_X86 | ||
| 274 | void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) | ||
| 275 | { | ||
| 276 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | ||
| 277 | |||
| 278 | if (!ioapic) | ||
| 279 | return; | ||
| 280 | kvm_make_scan_ioapic_request(kvm); | ||
| 281 | } | ||
| 282 | #else | ||
| 283 | void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) | ||
| 284 | { | ||
| 285 | return; | ||
| 286 | } | ||
| 287 | #endif | ||
| 288 | |||
| 289 | static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | ||
| 290 | { | ||
| 291 | unsigned index; | ||
| 292 | bool mask_before, mask_after; | ||
| 293 | union kvm_ioapic_redirect_entry *e; | ||
| 294 | |||
| 295 | switch (ioapic->ioregsel) { | ||
| 296 | case IOAPIC_REG_VERSION: | ||
| 297 | /* Writes are ignored. */ | ||
| 298 | break; | ||
| 299 | |||
| 300 | case IOAPIC_REG_APIC_ID: | ||
| 301 | ioapic->id = (val >> 24) & 0xf; | ||
| 302 | break; | ||
| 303 | |||
| 304 | case IOAPIC_REG_ARB_ID: | ||
| 305 | break; | ||
| 306 | |||
| 307 | default: | ||
| 308 | index = (ioapic->ioregsel - 0x10) >> 1; | ||
| 309 | |||
| 310 | ioapic_debug("change redir index %x val %x\n", index, val); | ||
| 311 | if (index >= IOAPIC_NUM_PINS) | ||
| 312 | return; | ||
| 313 | e = &ioapic->redirtbl[index]; | ||
| 314 | mask_before = e->fields.mask; | ||
| 315 | if (ioapic->ioregsel & 1) { | ||
| 316 | e->bits &= 0xffffffff; | ||
| 317 | e->bits |= (u64) val << 32; | ||
| 318 | } else { | ||
| 319 | e->bits &= ~0xffffffffULL; | ||
| 320 | e->bits |= (u32) val; | ||
| 321 | e->fields.remote_irr = 0; | ||
| 322 | } | ||
| 323 | update_handled_vectors(ioapic); | ||
| 324 | mask_after = e->fields.mask; | ||
| 325 | if (mask_before != mask_after) | ||
| 326 | kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); | ||
| 327 | if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG | ||
| 328 | && ioapic->irr & (1 << index)) | ||
| 329 | ioapic_service(ioapic, index, false); | ||
| 330 | kvm_vcpu_request_scan_ioapic(ioapic->kvm); | ||
| 331 | break; | ||
| 332 | } | ||
| 333 | } | ||
| 334 | |||
| 335 | static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status) | ||
| 336 | { | ||
| 337 | union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; | ||
| 338 | struct kvm_lapic_irq irqe; | ||
| 339 | int ret; | ||
| 340 | |||
| 341 | if (entry->fields.mask) | ||
| 342 | return -1; | ||
| 343 | |||
| 344 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " | ||
| 345 | "vector=%x trig_mode=%x\n", | ||
| 346 | entry->fields.dest_id, entry->fields.dest_mode, | ||
| 347 | entry->fields.delivery_mode, entry->fields.vector, | ||
| 348 | entry->fields.trig_mode); | ||
| 349 | |||
| 350 | irqe.dest_id = entry->fields.dest_id; | ||
| 351 | irqe.vector = entry->fields.vector; | ||
| 352 | irqe.dest_mode = entry->fields.dest_mode; | ||
| 353 | irqe.trig_mode = entry->fields.trig_mode; | ||
| 354 | irqe.delivery_mode = entry->fields.delivery_mode << 8; | ||
| 355 | irqe.level = 1; | ||
| 356 | irqe.shorthand = 0; | ||
| 357 | |||
| 358 | if (irqe.trig_mode == IOAPIC_EDGE_TRIG) | ||
| 359 | ioapic->irr &= ~(1 << irq); | ||
| 360 | |||
| 361 | if (irq == RTC_GSI && line_status) { | ||
| 362 | /* | ||
| 363 | * pending_eoi cannot ever become negative (see | ||
| 364 | * rtc_status_pending_eoi_check_valid) and the caller | ||
| 365 | * ensures that it is only called if it is >= zero, namely | ||
| 366 | * if rtc_irq_check_coalesced returns false). | ||
| 367 | */ | ||
| 368 | BUG_ON(ioapic->rtc_status.pending_eoi != 0); | ||
| 369 | ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, | ||
| 370 | ioapic->rtc_status.dest_map); | ||
| 371 | ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret); | ||
| 372 | } else | ||
| 373 | ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); | ||
| 374 | |||
| 375 | if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG) | ||
| 376 | entry->fields.remote_irr = 1; | ||
| 377 | |||
| 378 | return ret; | ||
| 379 | } | ||
| 380 | |||
| 381 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, | ||
| 382 | int level, bool line_status) | ||
| 383 | { | ||
| 384 | int ret, irq_level; | ||
| 385 | |||
| 386 | BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS); | ||
| 387 | |||
| 388 | spin_lock(&ioapic->lock); | ||
| 389 | irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq], | ||
| 390 | irq_source_id, level); | ||
| 391 | ret = ioapic_set_irq(ioapic, irq, irq_level, line_status); | ||
| 392 | |||
| 393 | spin_unlock(&ioapic->lock); | ||
| 394 | |||
| 395 | return ret; | ||
| 396 | } | ||
| 397 | |||
| 398 | void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id) | ||
| 399 | { | ||
| 400 | int i; | ||
| 401 | |||
| 402 | spin_lock(&ioapic->lock); | ||
| 403 | for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) | ||
| 404 | __clear_bit(irq_source_id, &ioapic->irq_states[i]); | ||
| 405 | spin_unlock(&ioapic->lock); | ||
| 406 | } | ||
| 407 | |||
| 408 | static void kvm_ioapic_eoi_inject_work(struct work_struct *work) | ||
| 409 | { | ||
| 410 | int i; | ||
| 411 | struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic, | ||
| 412 | eoi_inject.work); | ||
| 413 | spin_lock(&ioapic->lock); | ||
| 414 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { | ||
| 415 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; | ||
| 416 | |||
| 417 | if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG) | ||
| 418 | continue; | ||
| 419 | |||
| 420 | if (ioapic->irr & (1 << i) && !ent->fields.remote_irr) | ||
| 421 | ioapic_service(ioapic, i, false); | ||
| 422 | } | ||
| 423 | spin_unlock(&ioapic->lock); | ||
| 424 | } | ||
| 425 | |||
| 426 | #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000 | ||
| 427 | |||
| 428 | static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, | ||
| 429 | struct kvm_ioapic *ioapic, int vector, int trigger_mode) | ||
| 430 | { | ||
| 431 | int i; | ||
| 432 | |||
| 433 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { | ||
| 434 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; | ||
| 435 | |||
| 436 | if (ent->fields.vector != vector) | ||
| 437 | continue; | ||
| 438 | |||
| 439 | if (i == RTC_GSI) | ||
| 440 | rtc_irq_eoi(ioapic, vcpu); | ||
| 441 | /* | ||
| 442 | * We are dropping lock while calling ack notifiers because ack | ||
| 443 | * notifier callbacks for assigned devices call into IOAPIC | ||
| 444 | * recursively. Since remote_irr is cleared only after call | ||
| 445 | * to notifiers if the same vector will be delivered while lock | ||
| 446 | * is dropped it will be put into irr and will be delivered | ||
| 447 | * after ack notifier returns. | ||
| 448 | */ | ||
| 449 | spin_unlock(&ioapic->lock); | ||
| 450 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); | ||
| 451 | spin_lock(&ioapic->lock); | ||
| 452 | |||
| 453 | if (trigger_mode != IOAPIC_LEVEL_TRIG) | ||
| 454 | continue; | ||
| 455 | |||
| 456 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); | ||
| 457 | ent->fields.remote_irr = 0; | ||
| 458 | if (!ent->fields.mask && (ioapic->irr & (1 << i))) { | ||
| 459 | ++ioapic->irq_eoi[i]; | ||
| 460 | if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) { | ||
| 461 | /* | ||
| 462 | * Real hardware does not deliver the interrupt | ||
| 463 | * immediately during eoi broadcast, and this | ||
| 464 | * lets a buggy guest make slow progress | ||
| 465 | * even if it does not correctly handle a | ||
| 466 | * level-triggered interrupt. Emulate this | ||
| 467 | * behavior if we detect an interrupt storm. | ||
| 468 | */ | ||
| 469 | schedule_delayed_work(&ioapic->eoi_inject, HZ / 100); | ||
| 470 | ioapic->irq_eoi[i] = 0; | ||
| 471 | trace_kvm_ioapic_delayed_eoi_inj(ent->bits); | ||
| 472 | } else { | ||
| 473 | ioapic_service(ioapic, i, false); | ||
| 474 | } | ||
| 475 | } else { | ||
| 476 | ioapic->irq_eoi[i] = 0; | ||
| 477 | } | ||
| 478 | } | ||
| 479 | } | ||
| 480 | |||
| 481 | bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector) | ||
| 482 | { | ||
| 483 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | ||
| 484 | smp_rmb(); | ||
| 485 | return test_bit(vector, ioapic->handled_vectors); | ||
| 486 | } | ||
| 487 | |||
| 488 | void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode) | ||
| 489 | { | ||
| 490 | struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; | ||
| 491 | |||
| 492 | spin_lock(&ioapic->lock); | ||
| 493 | __kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode); | ||
| 494 | spin_unlock(&ioapic->lock); | ||
| 495 | } | ||
| 496 | |||
| 497 | static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) | ||
| 498 | { | ||
| 499 | return container_of(dev, struct kvm_ioapic, dev); | ||
| 500 | } | ||
| 501 | |||
| 502 | static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr) | ||
| 503 | { | ||
| 504 | return ((addr >= ioapic->base_address && | ||
| 505 | (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); | ||
| 506 | } | ||
| 507 | |||
| 508 | static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | ||
| 509 | void *val) | ||
| 510 | { | ||
| 511 | struct kvm_ioapic *ioapic = to_ioapic(this); | ||
| 512 | u32 result; | ||
| 513 | if (!ioapic_in_range(ioapic, addr)) | ||
| 514 | return -EOPNOTSUPP; | ||
| 515 | |||
| 516 | ioapic_debug("addr %lx\n", (unsigned long)addr); | ||
| 517 | ASSERT(!(addr & 0xf)); /* check alignment */ | ||
| 518 | |||
| 519 | addr &= 0xff; | ||
| 520 | spin_lock(&ioapic->lock); | ||
| 521 | switch (addr) { | ||
| 522 | case IOAPIC_REG_SELECT: | ||
| 523 | result = ioapic->ioregsel; | ||
| 524 | break; | ||
| 525 | |||
| 526 | case IOAPIC_REG_WINDOW: | ||
| 527 | result = ioapic_read_indirect(ioapic, addr, len); | ||
| 528 | break; | ||
| 529 | |||
| 530 | default: | ||
| 531 | result = 0; | ||
| 532 | break; | ||
| 533 | } | ||
| 534 | spin_unlock(&ioapic->lock); | ||
| 535 | |||
| 536 | switch (len) { | ||
| 537 | case 8: | ||
| 538 | *(u64 *) val = result; | ||
| 539 | break; | ||
| 540 | case 1: | ||
| 541 | case 2: | ||
| 542 | case 4: | ||
| 543 | memcpy(val, (char *)&result, len); | ||
| 544 | break; | ||
| 545 | default: | ||
| 546 | printk(KERN_WARNING "ioapic: wrong length %d\n", len); | ||
| 547 | } | ||
| 548 | return 0; | ||
| 549 | } | ||
| 550 | |||
| 551 | static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | ||
| 552 | const void *val) | ||
| 553 | { | ||
| 554 | struct kvm_ioapic *ioapic = to_ioapic(this); | ||
| 555 | u32 data; | ||
| 556 | if (!ioapic_in_range(ioapic, addr)) | ||
| 557 | return -EOPNOTSUPP; | ||
| 558 | |||
| 559 | ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n", | ||
| 560 | (void*)addr, len, val); | ||
| 561 | ASSERT(!(addr & 0xf)); /* check alignment */ | ||
| 562 | |||
| 563 | switch (len) { | ||
| 564 | case 8: | ||
| 565 | case 4: | ||
| 566 | data = *(u32 *) val; | ||
| 567 | break; | ||
| 568 | case 2: | ||
| 569 | data = *(u16 *) val; | ||
| 570 | break; | ||
| 571 | case 1: | ||
| 572 | data = *(u8 *) val; | ||
| 573 | break; | ||
| 574 | default: | ||
| 575 | printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); | ||
| 576 | return 0; | ||
| 577 | } | ||
| 578 | |||
| 579 | addr &= 0xff; | ||
| 580 | spin_lock(&ioapic->lock); | ||
| 581 | switch (addr) { | ||
| 582 | case IOAPIC_REG_SELECT: | ||
| 583 | ioapic->ioregsel = data & 0xFF; /* 8-bit register */ | ||
| 584 | break; | ||
| 585 | |||
| 586 | case IOAPIC_REG_WINDOW: | ||
| 587 | ioapic_write_indirect(ioapic, data); | ||
| 588 | break; | ||
| 589 | |||
| 590 | default: | ||
| 591 | break; | ||
| 592 | } | ||
| 593 | spin_unlock(&ioapic->lock); | ||
| 594 | return 0; | ||
| 595 | } | ||
| 596 | |||
| 597 | static void kvm_ioapic_reset(struct kvm_ioapic *ioapic) | ||
| 598 | { | ||
| 599 | int i; | ||
| 600 | |||
| 601 | cancel_delayed_work_sync(&ioapic->eoi_inject); | ||
| 602 | for (i = 0; i < IOAPIC_NUM_PINS; i++) | ||
| 603 | ioapic->redirtbl[i].fields.mask = 1; | ||
| 604 | ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; | ||
| 605 | ioapic->ioregsel = 0; | ||
| 606 | ioapic->irr = 0; | ||
| 607 | ioapic->id = 0; | ||
| 608 | memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS); | ||
| 609 | rtc_irq_eoi_tracking_reset(ioapic); | ||
| 610 | update_handled_vectors(ioapic); | ||
| 611 | } | ||
| 612 | |||
| 613 | static const struct kvm_io_device_ops ioapic_mmio_ops = { | ||
| 614 | .read = ioapic_mmio_read, | ||
| 615 | .write = ioapic_mmio_write, | ||
| 616 | }; | ||
| 617 | |||
| 618 | int kvm_ioapic_init(struct kvm *kvm) | ||
| 619 | { | ||
| 620 | struct kvm_ioapic *ioapic; | ||
| 621 | int ret; | ||
| 622 | |||
| 623 | ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); | ||
| 624 | if (!ioapic) | ||
| 625 | return -ENOMEM; | ||
| 626 | spin_lock_init(&ioapic->lock); | ||
| 627 | INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work); | ||
| 628 | kvm->arch.vioapic = ioapic; | ||
| 629 | kvm_ioapic_reset(ioapic); | ||
| 630 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); | ||
| 631 | ioapic->kvm = kvm; | ||
| 632 | mutex_lock(&kvm->slots_lock); | ||
| 633 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address, | ||
| 634 | IOAPIC_MEM_LENGTH, &ioapic->dev); | ||
| 635 | mutex_unlock(&kvm->slots_lock); | ||
| 636 | if (ret < 0) { | ||
| 637 | kvm->arch.vioapic = NULL; | ||
| 638 | kfree(ioapic); | ||
| 639 | } | ||
| 640 | |||
| 641 | return ret; | ||
| 642 | } | ||
| 643 | |||
| 644 | void kvm_ioapic_destroy(struct kvm *kvm) | ||
| 645 | { | ||
| 646 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | ||
| 647 | |||
| 648 | cancel_delayed_work_sync(&ioapic->eoi_inject); | ||
| 649 | if (ioapic) { | ||
| 650 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); | ||
| 651 | kvm->arch.vioapic = NULL; | ||
| 652 | kfree(ioapic); | ||
| 653 | } | ||
| 654 | } | ||
| 655 | |||
| 656 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | ||
| 657 | { | ||
| 658 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); | ||
| 659 | if (!ioapic) | ||
| 660 | return -EINVAL; | ||
| 661 | |||
| 662 | spin_lock(&ioapic->lock); | ||
| 663 | memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); | ||
| 664 | spin_unlock(&ioapic->lock); | ||
| 665 | return 0; | ||
| 666 | } | ||
| 667 | |||
| 668 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | ||
| 669 | { | ||
| 670 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); | ||
| 671 | if (!ioapic) | ||
| 672 | return -EINVAL; | ||
| 673 | |||
| 674 | spin_lock(&ioapic->lock); | ||
| 675 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); | ||
| 676 | ioapic->irr = 0; | ||
| 677 | update_handled_vectors(ioapic); | ||
| 678 | kvm_vcpu_request_scan_ioapic(kvm); | ||
| 679 | kvm_ioapic_inject_all(ioapic, state->irr); | ||
| 680 | spin_unlock(&ioapic->lock); | ||
| 681 | return 0; | ||
| 682 | } | ||
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h deleted file mode 100644 index dc3baa3a538f..000000000000 --- a/virt/kvm/ioapic.h +++ /dev/null | |||
| @@ -1,103 +0,0 @@ | |||
| 1 | #ifndef __KVM_IO_APIC_H | ||
| 2 | #define __KVM_IO_APIC_H | ||
| 3 | |||
| 4 | #include <linux/kvm_host.h> | ||
| 5 | |||
| 6 | #include "iodev.h" | ||
| 7 | |||
| 8 | struct kvm; | ||
| 9 | struct kvm_vcpu; | ||
| 10 | |||
| 11 | #define IOAPIC_NUM_PINS KVM_IOAPIC_NUM_PINS | ||
| 12 | #define IOAPIC_VERSION_ID 0x11 /* IOAPIC version */ | ||
| 13 | #define IOAPIC_EDGE_TRIG 0 | ||
| 14 | #define IOAPIC_LEVEL_TRIG 1 | ||
| 15 | |||
| 16 | #define IOAPIC_DEFAULT_BASE_ADDRESS 0xfec00000 | ||
| 17 | #define IOAPIC_MEM_LENGTH 0x100 | ||
| 18 | |||
| 19 | /* Direct registers. */ | ||
| 20 | #define IOAPIC_REG_SELECT 0x00 | ||
| 21 | #define IOAPIC_REG_WINDOW 0x10 | ||
| 22 | |||
| 23 | /* Indirect registers. */ | ||
| 24 | #define IOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */ | ||
| 25 | #define IOAPIC_REG_VERSION 0x01 | ||
| 26 | #define IOAPIC_REG_ARB_ID 0x02 /* x86 IOAPIC only */ | ||
| 27 | |||
| 28 | /*ioapic delivery mode*/ | ||
| 29 | #define IOAPIC_FIXED 0x0 | ||
| 30 | #define IOAPIC_LOWEST_PRIORITY 0x1 | ||
| 31 | #define IOAPIC_PMI 0x2 | ||
| 32 | #define IOAPIC_NMI 0x4 | ||
| 33 | #define IOAPIC_INIT 0x5 | ||
| 34 | #define IOAPIC_EXTINT 0x7 | ||
| 35 | |||
| 36 | #ifdef CONFIG_X86 | ||
| 37 | #define RTC_GSI 8 | ||
| 38 | #else | ||
| 39 | #define RTC_GSI -1U | ||
| 40 | #endif | ||
| 41 | |||
| 42 | struct rtc_status { | ||
| 43 | int pending_eoi; | ||
| 44 | DECLARE_BITMAP(dest_map, KVM_MAX_VCPUS); | ||
| 45 | }; | ||
| 46 | |||
| 47 | struct kvm_ioapic { | ||
| 48 | u64 base_address; | ||
| 49 | u32 ioregsel; | ||
| 50 | u32 id; | ||
| 51 | u32 irr; | ||
| 52 | u32 pad; | ||
| 53 | union kvm_ioapic_redirect_entry redirtbl[IOAPIC_NUM_PINS]; | ||
| 54 | unsigned long irq_states[IOAPIC_NUM_PINS]; | ||
| 55 | struct kvm_io_device dev; | ||
| 56 | struct kvm *kvm; | ||
| 57 | void (*ack_notifier)(void *opaque, int irq); | ||
| 58 | spinlock_t lock; | ||
| 59 | DECLARE_BITMAP(handled_vectors, 256); | ||
| 60 | struct rtc_status rtc_status; | ||
| 61 | struct delayed_work eoi_inject; | ||
| 62 | u32 irq_eoi[IOAPIC_NUM_PINS]; | ||
| 63 | }; | ||
| 64 | |||
| 65 | #ifdef DEBUG | ||
| 66 | #define ASSERT(x) \ | ||
| 67 | do { \ | ||
| 68 | if (!(x)) { \ | ||
| 69 | printk(KERN_EMERG "assertion failed %s: %d: %s\n", \ | ||
| 70 | __FILE__, __LINE__, #x); \ | ||
| 71 | BUG(); \ | ||
| 72 | } \ | ||
| 73 | } while (0) | ||
| 74 | #else | ||
| 75 | #define ASSERT(x) do { } while (0) | ||
| 76 | #endif | ||
| 77 | |||
| 78 | static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm) | ||
| 79 | { | ||
| 80 | return kvm->arch.vioapic; | ||
| 81 | } | ||
| 82 | |||
| 83 | void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu); | ||
| 84 | int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, | ||
| 85 | int short_hand, unsigned int dest, int dest_mode); | ||
| 86 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); | ||
| 87 | void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, | ||
| 88 | int trigger_mode); | ||
| 89 | bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector); | ||
| 90 | int kvm_ioapic_init(struct kvm *kvm); | ||
| 91 | void kvm_ioapic_destroy(struct kvm *kvm); | ||
| 92 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, | ||
| 93 | int level, bool line_status); | ||
| 94 | void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id); | ||
| 95 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | ||
| 96 | struct kvm_lapic_irq *irq, unsigned long *dest_map); | ||
| 97 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); | ||
| 98 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); | ||
| 99 | void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); | ||
| 100 | void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap, | ||
| 101 | u32 *tmr); | ||
| 102 | |||
| 103 | #endif | ||
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c deleted file mode 100644 index 1345bde064f5..000000000000 --- a/virt/kvm/irq_comm.c +++ /dev/null | |||
| @@ -1,347 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * irq_comm.c: Common API for in kernel interrupt controller | ||
| 3 | * Copyright (c) 2007, Intel Corporation. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms and conditions of the GNU General Public License, | ||
| 7 | * version 2, as published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
| 16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
| 17 | * Authors: | ||
| 18 | * Yaozu (Eddie) Dong <Eddie.dong@intel.com> | ||
| 19 | * | ||
| 20 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <linux/kvm_host.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | #include <linux/export.h> | ||
| 26 | #include <trace/events/kvm.h> | ||
| 27 | |||
| 28 | #include <asm/msidef.h> | ||
| 29 | |||
| 30 | #include "irq.h" | ||
| 31 | |||
| 32 | #include "ioapic.h" | ||
| 33 | |||
| 34 | static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, | ||
| 35 | struct kvm *kvm, int irq_source_id, int level, | ||
| 36 | bool line_status) | ||
| 37 | { | ||
| 38 | #ifdef CONFIG_X86 | ||
| 39 | struct kvm_pic *pic = pic_irqchip(kvm); | ||
| 40 | return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); | ||
| 41 | #else | ||
| 42 | return -1; | ||
| 43 | #endif | ||
| 44 | } | ||
| 45 | |||
| 46 | static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, | ||
| 47 | struct kvm *kvm, int irq_source_id, int level, | ||
| 48 | bool line_status) | ||
| 49 | { | ||
| 50 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | ||
| 51 | return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, | ||
| 52 | line_status); | ||
| 53 | } | ||
| 54 | |||
| 55 | inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq) | ||
| 56 | { | ||
| 57 | return irq->delivery_mode == APIC_DM_LOWEST; | ||
| 58 | } | ||
| 59 | |||
| 60 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | ||
| 61 | struct kvm_lapic_irq *irq, unsigned long *dest_map) | ||
| 62 | { | ||
| 63 | int i, r = -1; | ||
| 64 | struct kvm_vcpu *vcpu, *lowest = NULL; | ||
| 65 | |||
| 66 | if (irq->dest_mode == 0 && irq->dest_id == 0xff && | ||
| 67 | kvm_is_dm_lowest_prio(irq)) { | ||
| 68 | printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n"); | ||
| 69 | irq->delivery_mode = APIC_DM_FIXED; | ||
| 70 | } | ||
| 71 | |||
| 72 | if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map)) | ||
| 73 | return r; | ||
| 74 | |||
| 75 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
| 76 | if (!kvm_apic_present(vcpu)) | ||
| 77 | continue; | ||
| 78 | |||
| 79 | if (!kvm_apic_match_dest(vcpu, src, irq->shorthand, | ||
| 80 | irq->dest_id, irq->dest_mode)) | ||
| 81 | continue; | ||
| 82 | |||
| 83 | if (!kvm_is_dm_lowest_prio(irq)) { | ||
| 84 | if (r < 0) | ||
| 85 | r = 0; | ||
| 86 | r += kvm_apic_set_irq(vcpu, irq, dest_map); | ||
| 87 | } else if (kvm_lapic_enabled(vcpu)) { | ||
| 88 | if (!lowest) | ||
| 89 | lowest = vcpu; | ||
| 90 | else if (kvm_apic_compare_prio(vcpu, lowest) < 0) | ||
| 91 | lowest = vcpu; | ||
| 92 | } | ||
| 93 | } | ||
| 94 | |||
| 95 | if (lowest) | ||
| 96 | r = kvm_apic_set_irq(lowest, irq, dest_map); | ||
| 97 | |||
| 98 | return r; | ||
| 99 | } | ||
| 100 | |||
| 101 | static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e, | ||
| 102 | struct kvm_lapic_irq *irq) | ||
| 103 | { | ||
| 104 | trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data); | ||
| 105 | |||
| 106 | irq->dest_id = (e->msi.address_lo & | ||
| 107 | MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; | ||
| 108 | irq->vector = (e->msi.data & | ||
| 109 | MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; | ||
| 110 | irq->dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo; | ||
| 111 | irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data; | ||
| 112 | irq->delivery_mode = e->msi.data & 0x700; | ||
| 113 | irq->level = 1; | ||
| 114 | irq->shorthand = 0; | ||
| 115 | /* TODO Deal with RH bit of MSI message address */ | ||
| 116 | } | ||
| 117 | |||
| 118 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, | ||
| 119 | struct kvm *kvm, int irq_source_id, int level, bool line_status) | ||
| 120 | { | ||
| 121 | struct kvm_lapic_irq irq; | ||
| 122 | |||
| 123 | if (!level) | ||
| 124 | return -1; | ||
| 125 | |||
| 126 | kvm_set_msi_irq(e, &irq); | ||
| 127 | |||
| 128 | return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL); | ||
| 129 | } | ||
| 130 | |||
| 131 | |||
| 132 | static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e, | ||
| 133 | struct kvm *kvm) | ||
| 134 | { | ||
| 135 | struct kvm_lapic_irq irq; | ||
| 136 | int r; | ||
| 137 | |||
| 138 | kvm_set_msi_irq(e, &irq); | ||
| 139 | |||
| 140 | if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) | ||
| 141 | return r; | ||
| 142 | else | ||
| 143 | return -EWOULDBLOCK; | ||
| 144 | } | ||
| 145 | |||
| 146 | /* | ||
| 147 | * Deliver an IRQ in an atomic context if we can, or return a failure, | ||
| 148 | * user can retry in a process context. | ||
| 149 | * Return value: | ||
| 150 | * -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context. | ||
| 151 | * Other values - No need to retry. | ||
| 152 | */ | ||
| 153 | int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level) | ||
| 154 | { | ||
| 155 | struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS]; | ||
| 156 | struct kvm_kernel_irq_routing_entry *e; | ||
| 157 | int ret = -EINVAL; | ||
| 158 | int idx; | ||
| 159 | |||
| 160 | trace_kvm_set_irq(irq, level, irq_source_id); | ||
| 161 | |||
| 162 | /* | ||
| 163 | * Injection into either PIC or IOAPIC might need to scan all CPUs, | ||
| 164 | * which would need to be retried from thread context; when same GSI | ||
| 165 | * is connected to both PIC and IOAPIC, we'd have to report a | ||
| 166 | * partial failure here. | ||
| 167 | * Since there's no easy way to do this, we only support injecting MSI | ||
| 168 | * which is limited to 1:1 GSI mapping. | ||
| 169 | */ | ||
| 170 | idx = srcu_read_lock(&kvm->irq_srcu); | ||
| 171 | if (kvm_irq_map_gsi(kvm, entries, irq) > 0) { | ||
| 172 | e = &entries[0]; | ||
| 173 | if (likely(e->type == KVM_IRQ_ROUTING_MSI)) | ||
| 174 | ret = kvm_set_msi_inatomic(e, kvm); | ||
| 175 | else | ||
| 176 | ret = -EWOULDBLOCK; | ||
| 177 | } | ||
| 178 | srcu_read_unlock(&kvm->irq_srcu, idx); | ||
| 179 | return ret; | ||
| 180 | } | ||
| 181 | |||
| 182 | int kvm_request_irq_source_id(struct kvm *kvm) | ||
| 183 | { | ||
| 184 | unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; | ||
| 185 | int irq_source_id; | ||
| 186 | |||
| 187 | mutex_lock(&kvm->irq_lock); | ||
| 188 | irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG); | ||
| 189 | |||
| 190 | if (irq_source_id >= BITS_PER_LONG) { | ||
| 191 | printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n"); | ||
| 192 | irq_source_id = -EFAULT; | ||
| 193 | goto unlock; | ||
| 194 | } | ||
| 195 | |||
| 196 | ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); | ||
| 197 | #ifdef CONFIG_X86 | ||
| 198 | ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); | ||
| 199 | #endif | ||
| 200 | set_bit(irq_source_id, bitmap); | ||
| 201 | unlock: | ||
| 202 | mutex_unlock(&kvm->irq_lock); | ||
| 203 | |||
| 204 | return irq_source_id; | ||
| 205 | } | ||
| 206 | |||
| 207 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) | ||
| 208 | { | ||
| 209 | ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); | ||
| 210 | #ifdef CONFIG_X86 | ||
| 211 | ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); | ||
| 212 | #endif | ||
| 213 | |||
| 214 | mutex_lock(&kvm->irq_lock); | ||
| 215 | if (irq_source_id < 0 || | ||
| 216 | irq_source_id >= BITS_PER_LONG) { | ||
| 217 | printk(KERN_ERR "kvm: IRQ source ID out of range!\n"); | ||
| 218 | goto unlock; | ||
| 219 | } | ||
| 220 | clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); | ||
| 221 | if (!irqchip_in_kernel(kvm)) | ||
| 222 | goto unlock; | ||
| 223 | |||
| 224 | kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id); | ||
| 225 | #ifdef CONFIG_X86 | ||
| 226 | kvm_pic_clear_all(pic_irqchip(kvm), irq_source_id); | ||
| 227 | #endif | ||
| 228 | unlock: | ||
| 229 | mutex_unlock(&kvm->irq_lock); | ||
| 230 | } | ||
| 231 | |||
| 232 | void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, | ||
| 233 | struct kvm_irq_mask_notifier *kimn) | ||
| 234 | { | ||
| 235 | mutex_lock(&kvm->irq_lock); | ||
| 236 | kimn->irq = irq; | ||
| 237 | hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list); | ||
| 238 | mutex_unlock(&kvm->irq_lock); | ||
| 239 | } | ||
| 240 | |||
| 241 | void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, | ||
| 242 | struct kvm_irq_mask_notifier *kimn) | ||
| 243 | { | ||
| 244 | mutex_lock(&kvm->irq_lock); | ||
| 245 | hlist_del_rcu(&kimn->link); | ||
| 246 | mutex_unlock(&kvm->irq_lock); | ||
| 247 | synchronize_srcu(&kvm->irq_srcu); | ||
| 248 | } | ||
| 249 | |||
| 250 | void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, | ||
| 251 | bool mask) | ||
| 252 | { | ||
| 253 | struct kvm_irq_mask_notifier *kimn; | ||
| 254 | int idx, gsi; | ||
| 255 | |||
| 256 | idx = srcu_read_lock(&kvm->irq_srcu); | ||
| 257 | gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); | ||
| 258 | if (gsi != -1) | ||
| 259 | hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link) | ||
| 260 | if (kimn->irq == gsi) | ||
| 261 | kimn->func(kimn, mask); | ||
| 262 | srcu_read_unlock(&kvm->irq_srcu, idx); | ||
| 263 | } | ||
| 264 | |||
| 265 | int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e, | ||
| 266 | const struct kvm_irq_routing_entry *ue) | ||
| 267 | { | ||
| 268 | int r = -EINVAL; | ||
| 269 | int delta; | ||
| 270 | unsigned max_pin; | ||
| 271 | |||
| 272 | switch (ue->type) { | ||
| 273 | case KVM_IRQ_ROUTING_IRQCHIP: | ||
| 274 | delta = 0; | ||
| 275 | switch (ue->u.irqchip.irqchip) { | ||
| 276 | case KVM_IRQCHIP_PIC_MASTER: | ||
| 277 | e->set = kvm_set_pic_irq; | ||
| 278 | max_pin = PIC_NUM_PINS; | ||
| 279 | break; | ||
| 280 | case KVM_IRQCHIP_PIC_SLAVE: | ||
| 281 | e->set = kvm_set_pic_irq; | ||
| 282 | max_pin = PIC_NUM_PINS; | ||
| 283 | delta = 8; | ||
| 284 | break; | ||
| 285 | case KVM_IRQCHIP_IOAPIC: | ||
| 286 | max_pin = KVM_IOAPIC_NUM_PINS; | ||
| 287 | e->set = kvm_set_ioapic_irq; | ||
| 288 | break; | ||
| 289 | default: | ||
| 290 | goto out; | ||
| 291 | } | ||
| 292 | e->irqchip.irqchip = ue->u.irqchip.irqchip; | ||
| 293 | e->irqchip.pin = ue->u.irqchip.pin + delta; | ||
| 294 | if (e->irqchip.pin >= max_pin) | ||
| 295 | goto out; | ||
| 296 | break; | ||
| 297 | case KVM_IRQ_ROUTING_MSI: | ||
| 298 | e->set = kvm_set_msi; | ||
| 299 | e->msi.address_lo = ue->u.msi.address_lo; | ||
| 300 | e->msi.address_hi = ue->u.msi.address_hi; | ||
| 301 | e->msi.data = ue->u.msi.data; | ||
| 302 | break; | ||
| 303 | default: | ||
| 304 | goto out; | ||
| 305 | } | ||
| 306 | |||
| 307 | r = 0; | ||
| 308 | out: | ||
| 309 | return r; | ||
| 310 | } | ||
| 311 | |||
| 312 | #define IOAPIC_ROUTING_ENTRY(irq) \ | ||
| 313 | { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ | ||
| 314 | .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } } | ||
| 315 | #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq) | ||
| 316 | |||
| 317 | #ifdef CONFIG_X86 | ||
| 318 | # define PIC_ROUTING_ENTRY(irq) \ | ||
| 319 | { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ | ||
| 320 | .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } } | ||
| 321 | # define ROUTING_ENTRY2(irq) \ | ||
| 322 | IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq) | ||
| 323 | #else | ||
| 324 | # define ROUTING_ENTRY2(irq) \ | ||
| 325 | IOAPIC_ROUTING_ENTRY(irq) | ||
| 326 | #endif | ||
| 327 | |||
| 328 | static const struct kvm_irq_routing_entry default_routing[] = { | ||
| 329 | ROUTING_ENTRY2(0), ROUTING_ENTRY2(1), | ||
| 330 | ROUTING_ENTRY2(2), ROUTING_ENTRY2(3), | ||
| 331 | ROUTING_ENTRY2(4), ROUTING_ENTRY2(5), | ||
| 332 | ROUTING_ENTRY2(6), ROUTING_ENTRY2(7), | ||
| 333 | ROUTING_ENTRY2(8), ROUTING_ENTRY2(9), | ||
| 334 | ROUTING_ENTRY2(10), ROUTING_ENTRY2(11), | ||
| 335 | ROUTING_ENTRY2(12), ROUTING_ENTRY2(13), | ||
| 336 | ROUTING_ENTRY2(14), ROUTING_ENTRY2(15), | ||
| 337 | ROUTING_ENTRY1(16), ROUTING_ENTRY1(17), | ||
| 338 | ROUTING_ENTRY1(18), ROUTING_ENTRY1(19), | ||
| 339 | ROUTING_ENTRY1(20), ROUTING_ENTRY1(21), | ||
| 340 | ROUTING_ENTRY1(22), ROUTING_ENTRY1(23), | ||
| 341 | }; | ||
| 342 | |||
| 343 | int kvm_setup_default_irq_routing(struct kvm *kvm) | ||
| 344 | { | ||
| 345 | return kvm_set_irq_routing(kvm, default_routing, | ||
| 346 | ARRAY_SIZE(default_routing), 0); | ||
| 347 | } | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 751ece6a595c..3be43424818b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -468,9 +468,6 @@ static struct kvm *kvm_create_vm(unsigned long type) | |||
| 468 | if (r) | 468 | if (r) |
| 469 | goto out_err_no_disable; | 469 | goto out_err_no_disable; |
| 470 | 470 | ||
| 471 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | ||
| 472 | INIT_HLIST_HEAD(&kvm->mask_notifier_list); | ||
| 473 | #endif | ||
| 474 | #ifdef CONFIG_HAVE_KVM_IRQFD | 471 | #ifdef CONFIG_HAVE_KVM_IRQFD |
| 475 | INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); | 472 | INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); |
| 476 | #endif | 473 | #endif |
