diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-10-23 05:32:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-10-23 05:32:29 -0400 |
commit | 2c44f4f03574a3754306b5a8a80b9ed38a795104 (patch) | |
tree | 31d132ab5fac5323c1da94d72f8c89f0b497204a | |
parent | 8a990fb47b4c93653745512eca7d2d210678e6c1 (diff) | |
parent | ad355e383d826e3506c3caaa0fe991fd112de47b (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM bugfixes from Paolo Bonzini:
"Bug fixes for ARM, mostly 4.3 regressions related to virtual interrupt
controller changes"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
arm/arm64: KVM: Fix disabled distributor operation
arm/arm64: KVM: Clear map->active on pend/active clear
arm/arm64: KVM: Fix arch timer behavior for disabled interrupts
KVM: arm: use GIC support unconditionally
KVM: arm/arm64: Fix memory leak if timer initialization fails
KVM: arm/arm64: Do not inject spurious interrupts
-rw-r--r-- | arch/arm/kvm/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm/kvm/arm.c | 2 | ||||
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 19 | ||||
-rw-r--r-- | virt/kvm/arm/vgic.c | 95 |
4 files changed, 76 insertions, 41 deletions
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig index 210eccadb69a..356970f3b25e 100644 --- a/arch/arm/kvm/Kconfig +++ b/arch/arm/kvm/Kconfig | |||
@@ -21,6 +21,7 @@ config KVM | |||
21 | depends on MMU && OF | 21 | depends on MMU && OF |
22 | select PREEMPT_NOTIFIERS | 22 | select PREEMPT_NOTIFIERS |
23 | select ANON_INODES | 23 | select ANON_INODES |
24 | select ARM_GIC | ||
24 | select HAVE_KVM_CPU_RELAX_INTERCEPT | 25 | select HAVE_KVM_CPU_RELAX_INTERCEPT |
25 | select HAVE_KVM_ARCH_TLB_FLUSH_ALL | 26 | select HAVE_KVM_ARCH_TLB_FLUSH_ALL |
26 | select KVM_MMIO | 27 | select KVM_MMIO |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index dc017adfddc8..78b286994577 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -1080,7 +1080,7 @@ static int init_hyp_mode(void) | |||
1080 | */ | 1080 | */ |
1081 | err = kvm_timer_hyp_init(); | 1081 | err = kvm_timer_hyp_init(); |
1082 | if (err) | 1082 | if (err) |
1083 | goto out_free_mappings; | 1083 | goto out_free_context; |
1084 | 1084 | ||
1085 | #ifndef CONFIG_HOTPLUG_CPU | 1085 | #ifndef CONFIG_HOTPLUG_CPU |
1086 | free_boot_hyp_pgd(); | 1086 | free_boot_hyp_pgd(); |
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 48c6e1ac6827..b9d3a32cbc04 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -137,6 +137,8 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu) | |||
137 | void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) | 137 | void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) |
138 | { | 138 | { |
139 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 139 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
140 | bool phys_active; | ||
141 | int ret; | ||
140 | 142 | ||
141 | /* | 143 | /* |
142 | * We're about to run this vcpu again, so there is no need to | 144 | * We're about to run this vcpu again, so there is no need to |
@@ -151,6 +153,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) | |||
151 | */ | 153 | */ |
152 | if (kvm_timer_should_fire(vcpu)) | 154 | if (kvm_timer_should_fire(vcpu)) |
153 | kvm_timer_inject_irq(vcpu); | 155 | kvm_timer_inject_irq(vcpu); |
156 | |||
157 | /* | ||
158 | * We keep track of whether the edge-triggered interrupt has been | ||
159 | * signalled to the vgic/guest, and if so, we mask the interrupt and | ||
160 | * the physical distributor to prevent the timer from raising a | ||
161 | * physical interrupt whenever we run a guest, preventing forward | ||
162 | * VCPU progress. | ||
163 | */ | ||
164 | if (kvm_vgic_get_phys_irq_active(timer->map)) | ||
165 | phys_active = true; | ||
166 | else | ||
167 | phys_active = false; | ||
168 | |||
169 | ret = irq_set_irqchip_state(timer->map->irq, | ||
170 | IRQCHIP_STATE_ACTIVE, | ||
171 | phys_active); | ||
172 | WARN_ON(ret); | ||
154 | } | 173 | } |
155 | 174 | ||
156 | /** | 175 | /** |
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 6bd1c9bf7ae7..66c66165e712 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -531,6 +531,34 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm, | |||
531 | return false; | 531 | return false; |
532 | } | 532 | } |
533 | 533 | ||
534 | /* | ||
535 | * If a mapped interrupt's state has been modified by the guest such that it | ||
536 | * is no longer active or pending, without it have gone through the sync path, | ||
537 | * then the map->active field must be cleared so the interrupt can be taken | ||
538 | * again. | ||
539 | */ | ||
540 | static void vgic_handle_clear_mapped_irq(struct kvm_vcpu *vcpu) | ||
541 | { | ||
542 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
543 | struct list_head *root; | ||
544 | struct irq_phys_map_entry *entry; | ||
545 | struct irq_phys_map *map; | ||
546 | |||
547 | rcu_read_lock(); | ||
548 | |||
549 | /* Check for PPIs */ | ||
550 | root = &vgic_cpu->irq_phys_map_list; | ||
551 | list_for_each_entry_rcu(entry, root, entry) { | ||
552 | map = &entry->map; | ||
553 | |||
554 | if (!vgic_dist_irq_is_pending(vcpu, map->virt_irq) && | ||
555 | !vgic_irq_is_active(vcpu, map->virt_irq)) | ||
556 | map->active = false; | ||
557 | } | ||
558 | |||
559 | rcu_read_unlock(); | ||
560 | } | ||
561 | |||
534 | bool vgic_handle_clear_pending_reg(struct kvm *kvm, | 562 | bool vgic_handle_clear_pending_reg(struct kvm *kvm, |
535 | struct kvm_exit_mmio *mmio, | 563 | struct kvm_exit_mmio *mmio, |
536 | phys_addr_t offset, int vcpu_id) | 564 | phys_addr_t offset, int vcpu_id) |
@@ -561,6 +589,7 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm, | |||
561 | vcpu_id, offset); | 589 | vcpu_id, offset); |
562 | vgic_reg_access(mmio, reg, offset, mode); | 590 | vgic_reg_access(mmio, reg, offset, mode); |
563 | 591 | ||
592 | vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id)); | ||
564 | vgic_update_state(kvm); | 593 | vgic_update_state(kvm); |
565 | return true; | 594 | return true; |
566 | } | 595 | } |
@@ -598,6 +627,7 @@ bool vgic_handle_clear_active_reg(struct kvm *kvm, | |||
598 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); | 627 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); |
599 | 628 | ||
600 | if (mmio->is_write) { | 629 | if (mmio->is_write) { |
630 | vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id)); | ||
601 | vgic_update_state(kvm); | 631 | vgic_update_state(kvm); |
602 | return true; | 632 | return true; |
603 | } | 633 | } |
@@ -982,6 +1012,12 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) | |||
982 | pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; | 1012 | pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; |
983 | pend_shared = vcpu->arch.vgic_cpu.pending_shared; | 1013 | pend_shared = vcpu->arch.vgic_cpu.pending_shared; |
984 | 1014 | ||
1015 | if (!dist->enabled) { | ||
1016 | bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS); | ||
1017 | bitmap_zero(pend_shared, nr_shared); | ||
1018 | return 0; | ||
1019 | } | ||
1020 | |||
985 | pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id); | 1021 | pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id); |
986 | enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); | 1022 | enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); |
987 | bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); | 1023 | bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); |
@@ -1009,11 +1045,6 @@ void vgic_update_state(struct kvm *kvm) | |||
1009 | struct kvm_vcpu *vcpu; | 1045 | struct kvm_vcpu *vcpu; |
1010 | int c; | 1046 | int c; |
1011 | 1047 | ||
1012 | if (!dist->enabled) { | ||
1013 | set_bit(0, dist->irq_pending_on_cpu); | ||
1014 | return; | ||
1015 | } | ||
1016 | |||
1017 | kvm_for_each_vcpu(c, vcpu, kvm) { | 1048 | kvm_for_each_vcpu(c, vcpu, kvm) { |
1018 | if (compute_pending_for_cpu(vcpu)) | 1049 | if (compute_pending_for_cpu(vcpu)) |
1019 | set_bit(c, dist->irq_pending_on_cpu); | 1050 | set_bit(c, dist->irq_pending_on_cpu); |
@@ -1092,6 +1123,15 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) | |||
1092 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1123 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
1093 | struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); | 1124 | struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); |
1094 | 1125 | ||
1126 | /* | ||
1127 | * We must transfer the pending state back to the distributor before | ||
1128 | * retiring the LR, otherwise we may loose edge-triggered interrupts. | ||
1129 | */ | ||
1130 | if (vlr.state & LR_STATE_PENDING) { | ||
1131 | vgic_dist_irq_set_pending(vcpu, irq); | ||
1132 | vlr.hwirq = 0; | ||
1133 | } | ||
1134 | |||
1095 | vlr.state = 0; | 1135 | vlr.state = 0; |
1096 | vgic_set_lr(vcpu, lr_nr, vlr); | 1136 | vgic_set_lr(vcpu, lr_nr, vlr); |
1097 | clear_bit(lr_nr, vgic_cpu->lr_used); | 1137 | clear_bit(lr_nr, vgic_cpu->lr_used); |
@@ -1132,7 +1172,8 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq, | |||
1132 | kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state); | 1172 | kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state); |
1133 | vgic_irq_clear_active(vcpu, irq); | 1173 | vgic_irq_clear_active(vcpu, irq); |
1134 | vgic_update_state(vcpu->kvm); | 1174 | vgic_update_state(vcpu->kvm); |
1135 | } else if (vgic_dist_irq_is_pending(vcpu, irq)) { | 1175 | } else { |
1176 | WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq)); | ||
1136 | vlr.state |= LR_STATE_PENDING; | 1177 | vlr.state |= LR_STATE_PENDING; |
1137 | kvm_debug("Set pending: 0x%x\n", vlr.state); | 1178 | kvm_debug("Set pending: 0x%x\n", vlr.state); |
1138 | } | 1179 | } |
@@ -1240,7 +1281,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
1240 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1281 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
1241 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 1282 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
1242 | unsigned long *pa_percpu, *pa_shared; | 1283 | unsigned long *pa_percpu, *pa_shared; |
1243 | int i, vcpu_id, lr, ret; | 1284 | int i, vcpu_id; |
1244 | int overflow = 0; | 1285 | int overflow = 0; |
1245 | int nr_shared = vgic_nr_shared_irqs(dist); | 1286 | int nr_shared = vgic_nr_shared_irqs(dist); |
1246 | 1287 | ||
@@ -1295,31 +1336,6 @@ epilog: | |||
1295 | */ | 1336 | */ |
1296 | clear_bit(vcpu_id, dist->irq_pending_on_cpu); | 1337 | clear_bit(vcpu_id, dist->irq_pending_on_cpu); |
1297 | } | 1338 | } |
1298 | |||
1299 | for (lr = 0; lr < vgic->nr_lr; lr++) { | ||
1300 | struct vgic_lr vlr; | ||
1301 | |||
1302 | if (!test_bit(lr, vgic_cpu->lr_used)) | ||
1303 | continue; | ||
1304 | |||
1305 | vlr = vgic_get_lr(vcpu, lr); | ||
1306 | |||
1307 | /* | ||
1308 | * If we have a mapping, and the virtual interrupt is | ||
1309 | * presented to the guest (as pending or active), then we must | ||
1310 | * set the state to active in the physical world. See | ||
1311 | * Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt. | ||
1312 | */ | ||
1313 | if (vlr.state & LR_HW) { | ||
1314 | struct irq_phys_map *map; | ||
1315 | map = vgic_irq_map_search(vcpu, vlr.irq); | ||
1316 | |||
1317 | ret = irq_set_irqchip_state(map->irq, | ||
1318 | IRQCHIP_STATE_ACTIVE, | ||
1319 | true); | ||
1320 | WARN_ON(ret); | ||
1321 | } | ||
1322 | } | ||
1323 | } | 1339 | } |
1324 | 1340 | ||
1325 | static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | 1341 | static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) |
@@ -1421,7 +1437,7 @@ static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr) | |||
1421 | return 0; | 1437 | return 0; |
1422 | 1438 | ||
1423 | map = vgic_irq_map_search(vcpu, vlr.irq); | 1439 | map = vgic_irq_map_search(vcpu, vlr.irq); |
1424 | BUG_ON(!map || !map->active); | 1440 | BUG_ON(!map); |
1425 | 1441 | ||
1426 | ret = irq_get_irqchip_state(map->irq, | 1442 | ret = irq_get_irqchip_state(map->irq, |
1427 | IRQCHIP_STATE_ACTIVE, | 1443 | IRQCHIP_STATE_ACTIVE, |
@@ -1429,13 +1445,8 @@ static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr) | |||
1429 | 1445 | ||
1430 | WARN_ON(ret); | 1446 | WARN_ON(ret); |
1431 | 1447 | ||
1432 | if (map->active) { | 1448 | if (map->active) |
1433 | ret = irq_set_irqchip_state(map->irq, | ||
1434 | IRQCHIP_STATE_ACTIVE, | ||
1435 | false); | ||
1436 | WARN_ON(ret); | ||
1437 | return 0; | 1449 | return 0; |
1438 | } | ||
1439 | 1450 | ||
1440 | return 1; | 1451 | return 1; |
1441 | } | 1452 | } |
@@ -1607,8 +1618,12 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid, | |||
1607 | } else { | 1618 | } else { |
1608 | if (level_triggered) { | 1619 | if (level_triggered) { |
1609 | vgic_dist_irq_clear_level(vcpu, irq_num); | 1620 | vgic_dist_irq_clear_level(vcpu, irq_num); |
1610 | if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) | 1621 | if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) { |
1611 | vgic_dist_irq_clear_pending(vcpu, irq_num); | 1622 | vgic_dist_irq_clear_pending(vcpu, irq_num); |
1623 | vgic_cpu_irq_clear(vcpu, irq_num); | ||
1624 | if (!compute_pending_for_cpu(vcpu)) | ||
1625 | clear_bit(cpuid, dist->irq_pending_on_cpu); | ||
1626 | } | ||
1612 | } | 1627 | } |
1613 | 1628 | ||
1614 | ret = false; | 1629 | ret = false; |