diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-03-20 06:27:18 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-03-20 06:27:18 -0400 |
commit | 4958134df54c2c84e9c22ea042761d439164d26e (patch) | |
tree | 503177afab11f7d25b12a84ce25b481d305c51ba /virt | |
parent | c4f528795d1add8b63652673f7262729f679c6c1 (diff) | |
parent | c698ca5278934c0ae32297a8725ced2e27585d7f (diff) |
Merge 4.16-rc6 into tty-next
We want the serial/tty fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 122 | ||||
-rw-r--r-- | virt/kvm/arm/arm.c | 9 | ||||
-rw-r--r-- | virt/kvm/arm/hyp/vgic-v3-sr.c | 3 | ||||
-rw-r--r-- | virt/kvm/arm/mmu.c | 6 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio.c | 3 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-v2.c | 11 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-v3.c | 9 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.c | 87 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.h | 3 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 3 |
10 files changed, 171 insertions, 85 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 70268c0bec79..282389eb204f 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -36,6 +36,8 @@ static struct timecounter *timecounter; | |||
36 | static unsigned int host_vtimer_irq; | 36 | static unsigned int host_vtimer_irq; |
37 | static u32 host_vtimer_irq_flags; | 37 | static u32 host_vtimer_irq_flags; |
38 | 38 | ||
39 | static DEFINE_STATIC_KEY_FALSE(has_gic_active_state); | ||
40 | |||
39 | static const struct kvm_irq_level default_ptimer_irq = { | 41 | static const struct kvm_irq_level default_ptimer_irq = { |
40 | .irq = 30, | 42 | .irq = 30, |
41 | .level = 1, | 43 | .level = 1, |
@@ -56,6 +58,12 @@ u64 kvm_phys_timer_read(void) | |||
56 | return timecounter->cc->read(timecounter->cc); | 58 | return timecounter->cc->read(timecounter->cc); |
57 | } | 59 | } |
58 | 60 | ||
61 | static inline bool userspace_irqchip(struct kvm *kvm) | ||
62 | { | ||
63 | return static_branch_unlikely(&userspace_irqchip_in_use) && | ||
64 | unlikely(!irqchip_in_kernel(kvm)); | ||
65 | } | ||
66 | |||
59 | static void soft_timer_start(struct hrtimer *hrt, u64 ns) | 67 | static void soft_timer_start(struct hrtimer *hrt, u64 ns) |
60 | { | 68 | { |
61 | hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), | 69 | hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), |
@@ -69,25 +77,6 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work) | |||
69 | cancel_work_sync(work); | 77 | cancel_work_sync(work); |
70 | } | 78 | } |
71 | 79 | ||
72 | static void kvm_vtimer_update_mask_user(struct kvm_vcpu *vcpu) | ||
73 | { | ||
74 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | ||
75 | |||
76 | /* | ||
77 | * When using a userspace irqchip with the architected timers, we must | ||
78 | * prevent continuously exiting from the guest, and therefore mask the | ||
79 | * physical interrupt by disabling it on the host interrupt controller | ||
80 | * when the virtual level is high, such that the guest can make | ||
81 | * forward progress. Once we detect the output level being | ||
82 | * de-asserted, we unmask the interrupt again so that we exit from the | ||
83 | * guest when the timer fires. | ||
84 | */ | ||
85 | if (vtimer->irq.level) | ||
86 | disable_percpu_irq(host_vtimer_irq); | ||
87 | else | ||
88 | enable_percpu_irq(host_vtimer_irq, 0); | ||
89 | } | ||
90 | |||
91 | static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) | 80 | static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) |
92 | { | 81 | { |
93 | struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; | 82 | struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; |
@@ -106,9 +95,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) | |||
106 | if (kvm_timer_should_fire(vtimer)) | 95 | if (kvm_timer_should_fire(vtimer)) |
107 | kvm_timer_update_irq(vcpu, true, vtimer); | 96 | kvm_timer_update_irq(vcpu, true, vtimer); |
108 | 97 | ||
109 | if (static_branch_unlikely(&userspace_irqchip_in_use) && | 98 | if (userspace_irqchip(vcpu->kvm) && |
110 | unlikely(!irqchip_in_kernel(vcpu->kvm))) | 99 | !static_branch_unlikely(&has_gic_active_state)) |
111 | kvm_vtimer_update_mask_user(vcpu); | 100 | disable_percpu_irq(host_vtimer_irq); |
112 | 101 | ||
113 | return IRQ_HANDLED; | 102 | return IRQ_HANDLED; |
114 | } | 103 | } |
@@ -290,8 +279,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, | |||
290 | trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, | 279 | trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, |
291 | timer_ctx->irq.level); | 280 | timer_ctx->irq.level); |
292 | 281 | ||
293 | if (!static_branch_unlikely(&userspace_irqchip_in_use) || | 282 | if (!userspace_irqchip(vcpu->kvm)) { |
294 | likely(irqchip_in_kernel(vcpu->kvm))) { | ||
295 | ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, | 283 | ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, |
296 | timer_ctx->irq.irq, | 284 | timer_ctx->irq.irq, |
297 | timer_ctx->irq.level, | 285 | timer_ctx->irq.level, |
@@ -350,12 +338,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu) | |||
350 | phys_timer_emulate(vcpu); | 338 | phys_timer_emulate(vcpu); |
351 | } | 339 | } |
352 | 340 | ||
353 | static void __timer_snapshot_state(struct arch_timer_context *timer) | ||
354 | { | ||
355 | timer->cnt_ctl = read_sysreg_el0(cntv_ctl); | ||
356 | timer->cnt_cval = read_sysreg_el0(cntv_cval); | ||
357 | } | ||
358 | |||
359 | static void vtimer_save_state(struct kvm_vcpu *vcpu) | 341 | static void vtimer_save_state(struct kvm_vcpu *vcpu) |
360 | { | 342 | { |
361 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 343 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
@@ -367,8 +349,10 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu) | |||
367 | if (!vtimer->loaded) | 349 | if (!vtimer->loaded) |
368 | goto out; | 350 | goto out; |
369 | 351 | ||
370 | if (timer->enabled) | 352 | if (timer->enabled) { |
371 | __timer_snapshot_state(vtimer); | 353 | vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); |
354 | vtimer->cnt_cval = read_sysreg_el0(cntv_cval); | ||
355 | } | ||
372 | 356 | ||
373 | /* Disable the virtual timer */ | 357 | /* Disable the virtual timer */ |
374 | write_sysreg_el0(0, cntv_ctl); | 358 | write_sysreg_el0(0, cntv_ctl); |
@@ -460,23 +444,43 @@ static void set_cntvoff(u64 cntvoff) | |||
460 | kvm_call_hyp(__kvm_timer_set_cntvoff, low, high); | 444 | kvm_call_hyp(__kvm_timer_set_cntvoff, low, high); |
461 | } | 445 | } |
462 | 446 | ||
463 | static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu *vcpu) | 447 | static inline void set_vtimer_irq_phys_active(struct kvm_vcpu *vcpu, bool active) |
448 | { | ||
449 | int r; | ||
450 | r = irq_set_irqchip_state(host_vtimer_irq, IRQCHIP_STATE_ACTIVE, active); | ||
451 | WARN_ON(r); | ||
452 | } | ||
453 | |||
454 | static void kvm_timer_vcpu_load_gic(struct kvm_vcpu *vcpu) | ||
464 | { | 455 | { |
465 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 456 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
466 | bool phys_active; | 457 | bool phys_active; |
467 | int ret; | ||
468 | |||
469 | phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq); | ||
470 | 458 | ||
471 | ret = irq_set_irqchip_state(host_vtimer_irq, | 459 | if (irqchip_in_kernel(vcpu->kvm)) |
472 | IRQCHIP_STATE_ACTIVE, | 460 | phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq); |
473 | phys_active); | 461 | else |
474 | WARN_ON(ret); | 462 | phys_active = vtimer->irq.level; |
463 | set_vtimer_irq_phys_active(vcpu, phys_active); | ||
475 | } | 464 | } |
476 | 465 | ||
477 | static void kvm_timer_vcpu_load_user(struct kvm_vcpu *vcpu) | 466 | static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu) |
478 | { | 467 | { |
479 | kvm_vtimer_update_mask_user(vcpu); | 468 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
469 | |||
470 | /* | ||
471 | * When using a userspace irqchip with the architected timers and a | ||
472 | * host interrupt controller that doesn't support an active state, we | ||
473 | * must still prevent continuously exiting from the guest, and | ||
474 | * therefore mask the physical interrupt by disabling it on the host | ||
475 | * interrupt controller when the virtual level is high, such that the | ||
476 | * guest can make forward progress. Once we detect the output level | ||
477 | * being de-asserted, we unmask the interrupt again so that we exit | ||
478 | * from the guest when the timer fires. | ||
479 | */ | ||
480 | if (vtimer->irq.level) | ||
481 | disable_percpu_irq(host_vtimer_irq); | ||
482 | else | ||
483 | enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); | ||
480 | } | 484 | } |
481 | 485 | ||
482 | void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) | 486 | void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) |
@@ -487,10 +491,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) | |||
487 | if (unlikely(!timer->enabled)) | 491 | if (unlikely(!timer->enabled)) |
488 | return; | 492 | return; |
489 | 493 | ||
490 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) | 494 | if (static_branch_likely(&has_gic_active_state)) |
491 | kvm_timer_vcpu_load_user(vcpu); | 495 | kvm_timer_vcpu_load_gic(vcpu); |
492 | else | 496 | else |
493 | kvm_timer_vcpu_load_vgic(vcpu); | 497 | kvm_timer_vcpu_load_nogic(vcpu); |
494 | 498 | ||
495 | set_cntvoff(vtimer->cntvoff); | 499 | set_cntvoff(vtimer->cntvoff); |
496 | 500 | ||
@@ -555,22 +559,29 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu) | |||
555 | { | 559 | { |
556 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 560 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
557 | 561 | ||
558 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { | 562 | if (!kvm_timer_should_fire(vtimer)) { |
559 | __timer_snapshot_state(vtimer); | 563 | kvm_timer_update_irq(vcpu, false, vtimer); |
560 | if (!kvm_timer_should_fire(vtimer)) { | 564 | if (static_branch_likely(&has_gic_active_state)) |
561 | kvm_timer_update_irq(vcpu, false, vtimer); | 565 | set_vtimer_irq_phys_active(vcpu, false); |
562 | kvm_vtimer_update_mask_user(vcpu); | 566 | else |
563 | } | 567 | enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); |
564 | } | 568 | } |
565 | } | 569 | } |
566 | 570 | ||
567 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) | 571 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) |
568 | { | 572 | { |
569 | unmask_vtimer_irq_user(vcpu); | 573 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
574 | |||
575 | if (unlikely(!timer->enabled)) | ||
576 | return; | ||
577 | |||
578 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) | ||
579 | unmask_vtimer_irq_user(vcpu); | ||
570 | } | 580 | } |
571 | 581 | ||
572 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) | 582 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) |
573 | { | 583 | { |
584 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
574 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); | 585 | struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); |
575 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); | 586 | struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); |
576 | 587 | ||
@@ -584,6 +595,9 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) | |||
584 | ptimer->cnt_ctl = 0; | 595 | ptimer->cnt_ctl = 0; |
585 | kvm_timer_update_state(vcpu); | 596 | kvm_timer_update_state(vcpu); |
586 | 597 | ||
598 | if (timer->enabled && irqchip_in_kernel(vcpu->kvm)) | ||
599 | kvm_vgic_reset_mapped_irq(vcpu, vtimer->irq.irq); | ||
600 | |||
587 | return 0; | 601 | return 0; |
588 | } | 602 | } |
589 | 603 | ||
@@ -753,9 +767,11 @@ int kvm_timer_hyp_init(bool has_gic) | |||
753 | kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); | 767 | kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); |
754 | goto out_free_irq; | 768 | goto out_free_irq; |
755 | } | 769 | } |
770 | |||
771 | static_branch_enable(&has_gic_active_state); | ||
756 | } | 772 | } |
757 | 773 | ||
758 | kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); | 774 | kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq); |
759 | 775 | ||
760 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, | 776 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, |
761 | "kvm/arm/timer:starting", kvm_timer_starting_cpu, | 777 | "kvm/arm/timer:starting", kvm_timer_starting_cpu, |
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 86941f6181bb..53572304843b 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c | |||
@@ -384,14 +384,11 @@ static void vcpu_power_off(struct kvm_vcpu *vcpu) | |||
384 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 384 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
385 | struct kvm_mp_state *mp_state) | 385 | struct kvm_mp_state *mp_state) |
386 | { | 386 | { |
387 | vcpu_load(vcpu); | ||
388 | |||
389 | if (vcpu->arch.power_off) | 387 | if (vcpu->arch.power_off) |
390 | mp_state->mp_state = KVM_MP_STATE_STOPPED; | 388 | mp_state->mp_state = KVM_MP_STATE_STOPPED; |
391 | else | 389 | else |
392 | mp_state->mp_state = KVM_MP_STATE_RUNNABLE; | 390 | mp_state->mp_state = KVM_MP_STATE_RUNNABLE; |
393 | 391 | ||
394 | vcpu_put(vcpu); | ||
395 | return 0; | 392 | return 0; |
396 | } | 393 | } |
397 | 394 | ||
@@ -400,8 +397,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |||
400 | { | 397 | { |
401 | int ret = 0; | 398 | int ret = 0; |
402 | 399 | ||
403 | vcpu_load(vcpu); | ||
404 | |||
405 | switch (mp_state->mp_state) { | 400 | switch (mp_state->mp_state) { |
406 | case KVM_MP_STATE_RUNNABLE: | 401 | case KVM_MP_STATE_RUNNABLE: |
407 | vcpu->arch.power_off = false; | 402 | vcpu->arch.power_off = false; |
@@ -413,7 +408,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |||
413 | ret = -EINVAL; | 408 | ret = -EINVAL; |
414 | } | 409 | } |
415 | 410 | ||
416 | vcpu_put(vcpu); | ||
417 | return ret; | 411 | return ret; |
418 | } | 412 | } |
419 | 413 | ||
@@ -1036,8 +1030,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
1036 | struct kvm_device_attr attr; | 1030 | struct kvm_device_attr attr; |
1037 | long r; | 1031 | long r; |
1038 | 1032 | ||
1039 | vcpu_load(vcpu); | ||
1040 | |||
1041 | switch (ioctl) { | 1033 | switch (ioctl) { |
1042 | case KVM_ARM_VCPU_INIT: { | 1034 | case KVM_ARM_VCPU_INIT: { |
1043 | struct kvm_vcpu_init init; | 1035 | struct kvm_vcpu_init init; |
@@ -1114,7 +1106,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
1114 | r = -EINVAL; | 1106 | r = -EINVAL; |
1115 | } | 1107 | } |
1116 | 1108 | ||
1117 | vcpu_put(vcpu); | ||
1118 | return r; | 1109 | return r; |
1119 | } | 1110 | } |
1120 | 1111 | ||
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c index f5c3d6d7019e..b89ce5432214 100644 --- a/virt/kvm/arm/hyp/vgic-v3-sr.c +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c | |||
@@ -215,7 +215,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
215 | * are now visible to the system register interface. | 215 | * are now visible to the system register interface. |
216 | */ | 216 | */ |
217 | if (!cpu_if->vgic_sre) { | 217 | if (!cpu_if->vgic_sre) { |
218 | dsb(st); | 218 | dsb(sy); |
219 | isb(); | ||
219 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); | 220 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); |
220 | } | 221 | } |
221 | 222 | ||
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index ec62d1cccab7..b960acdd0c05 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c | |||
@@ -1810,9 +1810,9 @@ int kvm_mmu_init(void) | |||
1810 | */ | 1810 | */ |
1811 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); | 1811 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); |
1812 | 1812 | ||
1813 | kvm_info("IDMAP page: %lx\n", hyp_idmap_start); | 1813 | kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); |
1814 | kvm_info("HYP VA range: %lx:%lx\n", | 1814 | kvm_debug("HYP VA range: %lx:%lx\n", |
1815 | kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); | 1815 | kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); |
1816 | 1816 | ||
1817 | if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && | 1817 | if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && |
1818 | hyp_idmap_start < kern_hyp_va(~0UL) && | 1818 | hyp_idmap_start < kern_hyp_va(~0UL) && |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 83d82bd7dc4e..dbe99d635c80 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c | |||
@@ -113,9 +113,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, | |||
113 | /* Loop over all IRQs affected by this read */ | 113 | /* Loop over all IRQs affected by this read */ |
114 | for (i = 0; i < len * 8; i++) { | 114 | for (i = 0; i < len * 8; i++) { |
115 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 115 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
116 | unsigned long flags; | ||
116 | 117 | ||
118 | spin_lock_irqsave(&irq->irq_lock, flags); | ||
117 | if (irq_is_pending(irq)) | 119 | if (irq_is_pending(irq)) |
118 | value |= (1U << i); | 120 | value |= (1U << i); |
121 | spin_unlock_irqrestore(&irq->irq_lock, flags); | ||
119 | 122 | ||
120 | vgic_put_irq(vcpu->kvm, irq); | 123 | vgic_put_irq(vcpu->kvm, irq); |
121 | } | 124 | } |
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index c32d7b93ffd1..29556f71b691 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c | |||
@@ -37,6 +37,13 @@ void vgic_v2_init_lrs(void) | |||
37 | vgic_v2_write_lr(i, 0); | 37 | vgic_v2_write_lr(i, 0); |
38 | } | 38 | } |
39 | 39 | ||
40 | void vgic_v2_set_npie(struct kvm_vcpu *vcpu) | ||
41 | { | ||
42 | struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; | ||
43 | |||
44 | cpuif->vgic_hcr |= GICH_HCR_NPIE; | ||
45 | } | ||
46 | |||
40 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) | 47 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) |
41 | { | 48 | { |
42 | struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; | 49 | struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; |
@@ -64,7 +71,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
64 | int lr; | 71 | int lr; |
65 | unsigned long flags; | 72 | unsigned long flags; |
66 | 73 | ||
67 | cpuif->vgic_hcr &= ~GICH_HCR_UIE; | 74 | cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE); |
68 | 75 | ||
69 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { | 76 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { |
70 | u32 val = cpuif->vgic_lr[lr]; | 77 | u32 val = cpuif->vgic_lr[lr]; |
@@ -410,7 +417,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info) | |||
410 | kvm_vgic_global_state.type = VGIC_V2; | 417 | kvm_vgic_global_state.type = VGIC_V2; |
411 | kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS; | 418 | kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS; |
412 | 419 | ||
413 | kvm_info("vgic-v2@%llx\n", info->vctrl.start); | 420 | kvm_debug("vgic-v2@%llx\n", info->vctrl.start); |
414 | 421 | ||
415 | return 0; | 422 | return 0; |
416 | out: | 423 | out: |
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 6b329414e57a..0ff2006f3781 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
@@ -26,6 +26,13 @@ static bool group1_trap; | |||
26 | static bool common_trap; | 26 | static bool common_trap; |
27 | static bool gicv4_enable; | 27 | static bool gicv4_enable; |
28 | 28 | ||
29 | void vgic_v3_set_npie(struct kvm_vcpu *vcpu) | ||
30 | { | ||
31 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; | ||
32 | |||
33 | cpuif->vgic_hcr |= ICH_HCR_NPIE; | ||
34 | } | ||
35 | |||
29 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) | 36 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) |
30 | { | 37 | { |
31 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; | 38 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; |
@@ -47,7 +54,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |||
47 | int lr; | 54 | int lr; |
48 | unsigned long flags; | 55 | unsigned long flags; |
49 | 56 | ||
50 | cpuif->vgic_hcr &= ~ICH_HCR_UIE; | 57 | cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE); |
51 | 58 | ||
52 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { | 59 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { |
53 | u64 val = cpuif->vgic_lr[lr]; | 60 | u64 val = cpuif->vgic_lr[lr]; |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index c7c5ef190afa..8201899126f6 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
@@ -495,6 +495,32 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, | |||
495 | return ret; | 495 | return ret; |
496 | } | 496 | } |
497 | 497 | ||
498 | /** | ||
499 | * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ | ||
500 | * @vcpu: The VCPU pointer | ||
501 | * @vintid: The INTID of the interrupt | ||
502 | * | ||
503 | * Reset the active and pending states of a mapped interrupt. Kernel | ||
504 | * subsystems injecting mapped interrupts should reset their interrupt lines | ||
505 | * when we are doing a reset of the VM. | ||
506 | */ | ||
507 | void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid) | ||
508 | { | ||
509 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); | ||
510 | unsigned long flags; | ||
511 | |||
512 | if (!irq->hw) | ||
513 | goto out; | ||
514 | |||
515 | spin_lock_irqsave(&irq->irq_lock, flags); | ||
516 | irq->active = false; | ||
517 | irq->pending_latch = false; | ||
518 | irq->line_level = false; | ||
519 | spin_unlock_irqrestore(&irq->irq_lock, flags); | ||
520 | out: | ||
521 | vgic_put_irq(vcpu->kvm, irq); | ||
522 | } | ||
523 | |||
498 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) | 524 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) |
499 | { | 525 | { |
500 | struct vgic_irq *irq; | 526 | struct vgic_irq *irq; |
@@ -684,22 +710,37 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) | |||
684 | vgic_v3_set_underflow(vcpu); | 710 | vgic_v3_set_underflow(vcpu); |
685 | } | 711 | } |
686 | 712 | ||
713 | static inline void vgic_set_npie(struct kvm_vcpu *vcpu) | ||
714 | { | ||
715 | if (kvm_vgic_global_state.type == VGIC_V2) | ||
716 | vgic_v2_set_npie(vcpu); | ||
717 | else | ||
718 | vgic_v3_set_npie(vcpu); | ||
719 | } | ||
720 | |||
687 | /* Requires the ap_list_lock to be held. */ | 721 | /* Requires the ap_list_lock to be held. */ |
688 | static int compute_ap_list_depth(struct kvm_vcpu *vcpu) | 722 | static int compute_ap_list_depth(struct kvm_vcpu *vcpu, |
723 | bool *multi_sgi) | ||
689 | { | 724 | { |
690 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 725 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
691 | struct vgic_irq *irq; | 726 | struct vgic_irq *irq; |
692 | int count = 0; | 727 | int count = 0; |
693 | 728 | ||
729 | *multi_sgi = false; | ||
730 | |||
694 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | 731 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); |
695 | 732 | ||
696 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 733 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
697 | spin_lock(&irq->irq_lock); | 734 | spin_lock(&irq->irq_lock); |
698 | /* GICv2 SGIs can count for more than one... */ | 735 | /* GICv2 SGIs can count for more than one... */ |
699 | if (vgic_irq_is_sgi(irq->intid) && irq->source) | 736 | if (vgic_irq_is_sgi(irq->intid) && irq->source) { |
700 | count += hweight8(irq->source); | 737 | int w = hweight8(irq->source); |
701 | else | 738 | |
739 | count += w; | ||
740 | *multi_sgi |= (w > 1); | ||
741 | } else { | ||
702 | count++; | 742 | count++; |
743 | } | ||
703 | spin_unlock(&irq->irq_lock); | 744 | spin_unlock(&irq->irq_lock); |
704 | } | 745 | } |
705 | return count; | 746 | return count; |
@@ -710,28 +751,43 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |||
710 | { | 751 | { |
711 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 752 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
712 | struct vgic_irq *irq; | 753 | struct vgic_irq *irq; |
713 | int count = 0; | 754 | int count; |
755 | bool npie = false; | ||
756 | bool multi_sgi; | ||
757 | u8 prio = 0xff; | ||
714 | 758 | ||
715 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | 759 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); |
716 | 760 | ||
717 | if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) | 761 | count = compute_ap_list_depth(vcpu, &multi_sgi); |
762 | if (count > kvm_vgic_global_state.nr_lr || multi_sgi) | ||
718 | vgic_sort_ap_list(vcpu); | 763 | vgic_sort_ap_list(vcpu); |
719 | 764 | ||
765 | count = 0; | ||
766 | |||
720 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 767 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
721 | spin_lock(&irq->irq_lock); | 768 | spin_lock(&irq->irq_lock); |
722 | 769 | ||
723 | if (unlikely(vgic_target_oracle(irq) != vcpu)) | ||
724 | goto next; | ||
725 | |||
726 | /* | 770 | /* |
727 | * If we get an SGI with multiple sources, try to get | 771 | * If we have multi-SGIs in the pipeline, we need to |
728 | * them in all at once. | 772 | * guarantee that they are all seen before any IRQ of |
773 | * lower priority. In that case, we need to filter out | ||
774 | * these interrupts by exiting early. This is easy as | ||
775 | * the AP list has been sorted already. | ||
729 | */ | 776 | */ |
730 | do { | 777 | if (multi_sgi && irq->priority > prio) { |
778 | spin_unlock(&irq->irq_lock); | ||
779 | break; | ||
780 | } | ||
781 | |||
782 | if (likely(vgic_target_oracle(irq) == vcpu)) { | ||
731 | vgic_populate_lr(vcpu, irq, count++); | 783 | vgic_populate_lr(vcpu, irq, count++); |
732 | } while (irq->source && count < kvm_vgic_global_state.nr_lr); | ||
733 | 784 | ||
734 | next: | 785 | if (irq->source) { |
786 | npie = true; | ||
787 | prio = irq->priority; | ||
788 | } | ||
789 | } | ||
790 | |||
735 | spin_unlock(&irq->irq_lock); | 791 | spin_unlock(&irq->irq_lock); |
736 | 792 | ||
737 | if (count == kvm_vgic_global_state.nr_lr) { | 793 | if (count == kvm_vgic_global_state.nr_lr) { |
@@ -742,6 +798,9 @@ next: | |||
742 | } | 798 | } |
743 | } | 799 | } |
744 | 800 | ||
801 | if (npie) | ||
802 | vgic_set_npie(vcpu); | ||
803 | |||
745 | vcpu->arch.vgic_cpu.used_lrs = count; | 804 | vcpu->arch.vgic_cpu.used_lrs = count; |
746 | 805 | ||
747 | /* Nuke remaining LRs */ | 806 | /* Nuke remaining LRs */ |
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 12c37b89f7a3..f5b8519e5546 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h | |||
@@ -96,6 +96,7 @@ | |||
96 | /* we only support 64 kB translation table page size */ | 96 | /* we only support 64 kB translation table page size */ |
97 | #define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16) | 97 | #define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16) |
98 | 98 | ||
99 | /* Requires the irq_lock to be held by the caller. */ | ||
99 | static inline bool irq_is_pending(struct vgic_irq *irq) | 100 | static inline bool irq_is_pending(struct vgic_irq *irq) |
100 | { | 101 | { |
101 | if (irq->config == VGIC_CONFIG_EDGE) | 102 | if (irq->config == VGIC_CONFIG_EDGE) |
@@ -159,6 +160,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu); | |||
159 | void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); | 160 | void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); |
160 | void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); | 161 | void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); |
161 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); | 162 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); |
163 | void vgic_v2_set_npie(struct kvm_vcpu *vcpu); | ||
162 | int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); | 164 | int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); |
163 | int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, | 165 | int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, |
164 | int offset, u32 *val); | 166 | int offset, u32 *val); |
@@ -188,6 +190,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); | |||
188 | void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); | 190 | void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); |
189 | void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); | 191 | void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); |
190 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); | 192 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); |
193 | void vgic_v3_set_npie(struct kvm_vcpu *vcpu); | ||
191 | void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | 194 | void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); |
192 | void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | 195 | void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); |
193 | void vgic_v3_enable(struct kvm_vcpu *vcpu); | 196 | void vgic_v3_enable(struct kvm_vcpu *vcpu); |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4501e658e8d6..65dea3ffef68 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -969,8 +969,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
969 | /* Check for overlaps */ | 969 | /* Check for overlaps */ |
970 | r = -EEXIST; | 970 | r = -EEXIST; |
971 | kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { | 971 | kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { |
972 | if ((slot->id >= KVM_USER_MEM_SLOTS) || | 972 | if (slot->id == id) |
973 | (slot->id == id)) | ||
974 | continue; | 973 | continue; |
975 | if (!((base_gfn + npages <= slot->base_gfn) || | 974 | if (!((base_gfn + npages <= slot->base_gfn) || |
976 | (base_gfn >= slot->base_gfn + slot->npages))) | 975 | (base_gfn >= slot->base_gfn + slot->npages))) |