diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-11-13 13:28:53 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-11-13 13:28:53 -0500 |
commit | e234832afb623fe5c7d1d5703d6619494d8d703f (patch) | |
tree | bc9b5572c3fab768a39d64bb0951d029e684fbca | |
parent | e861d890c02c1105d0c82e3904c67f0386352fe2 (diff) | |
parent | 05d36a7dff0b091803034a0d70b41af86aecbc8d (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini:
"ARM fixes. There are a couple pending x86 patches but they'll have to
wait for next week"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: arm/arm64: vgic: Kick VCPUs when queueing already pending IRQs
KVM: arm/arm64: vgic: Prevent access to invalid SPIs
arm/arm64: KVM: Perform local TLB invalidation when multiplexing vcpus on a single CPU
-rw-r--r-- | arch/arm/include/asm/kvm_asm.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_hyp.h | 1 | ||||
-rw-r--r-- | arch/arm/kvm/arm.c | 27 | ||||
-rw-r--r-- | arch/arm/kvm/hyp/tlb.c | 15 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_asm.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/tlb.c | 15 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio.c | 41 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio.h | 14 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.c | 12 |
12 files changed, 112 insertions, 23 deletions
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index d7ea6bcb29bf..8ef05381984b 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h | |||
@@ -66,6 +66,7 @@ extern char __kvm_hyp_vector[]; | |||
66 | extern void __kvm_flush_vm_context(void); | 66 | extern void __kvm_flush_vm_context(void); |
67 | extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | 67 | extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
68 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); | 68 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); |
69 | extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); | ||
69 | 70 | ||
70 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | 71 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); |
71 | 72 | ||
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 2d19e02d03fd..d5423ab15ed5 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -57,6 +57,9 @@ struct kvm_arch { | |||
57 | /* VTTBR value associated with below pgd and vmid */ | 57 | /* VTTBR value associated with below pgd and vmid */ |
58 | u64 vttbr; | 58 | u64 vttbr; |
59 | 59 | ||
60 | /* The last vcpu id that ran on each physical CPU */ | ||
61 | int __percpu *last_vcpu_ran; | ||
62 | |||
60 | /* Timer */ | 63 | /* Timer */ |
61 | struct arch_timer_kvm timer; | 64 | struct arch_timer_kvm timer; |
62 | 65 | ||
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index 343135ede5fa..58508900c4bb 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h | |||
@@ -71,6 +71,7 @@ | |||
71 | #define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) | 71 | #define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) |
72 | #define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) | 72 | #define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) |
73 | #define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) | 73 | #define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) |
74 | #define TLBIALL __ACCESS_CP15(c8, 0, c7, 0) | ||
74 | #define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) | 75 | #define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) |
75 | #define PRRR __ACCESS_CP15(c10, 0, c2, 0) | 76 | #define PRRR __ACCESS_CP15(c10, 0, c2, 0) |
76 | #define NMRR __ACCESS_CP15(c10, 0, c2, 1) | 77 | #define NMRR __ACCESS_CP15(c10, 0, c2, 1) |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 08bb84f2ad58..19b5f5c1c0ff 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn) | |||
114 | */ | 114 | */ |
115 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | 115 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
116 | { | 116 | { |
117 | int ret = 0; | 117 | int ret, cpu; |
118 | 118 | ||
119 | if (type) | 119 | if (type) |
120 | return -EINVAL; | 120 | return -EINVAL; |
121 | 121 | ||
122 | kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran)); | ||
123 | if (!kvm->arch.last_vcpu_ran) | ||
124 | return -ENOMEM; | ||
125 | |||
126 | for_each_possible_cpu(cpu) | ||
127 | *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1; | ||
128 | |||
122 | ret = kvm_alloc_stage2_pgd(kvm); | 129 | ret = kvm_alloc_stage2_pgd(kvm); |
123 | if (ret) | 130 | if (ret) |
124 | goto out_fail_alloc; | 131 | goto out_fail_alloc; |
@@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
141 | out_free_stage2_pgd: | 148 | out_free_stage2_pgd: |
142 | kvm_free_stage2_pgd(kvm); | 149 | kvm_free_stage2_pgd(kvm); |
143 | out_fail_alloc: | 150 | out_fail_alloc: |
151 | free_percpu(kvm->arch.last_vcpu_ran); | ||
152 | kvm->arch.last_vcpu_ran = NULL; | ||
144 | return ret; | 153 | return ret; |
145 | } | 154 | } |
146 | 155 | ||
@@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
168 | { | 177 | { |
169 | int i; | 178 | int i; |
170 | 179 | ||
180 | free_percpu(kvm->arch.last_vcpu_ran); | ||
181 | kvm->arch.last_vcpu_ran = NULL; | ||
182 | |||
171 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 183 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
172 | if (kvm->vcpus[i]) { | 184 | if (kvm->vcpus[i]) { |
173 | kvm_arch_vcpu_free(kvm->vcpus[i]); | 185 | kvm_arch_vcpu_free(kvm->vcpus[i]); |
@@ -312,6 +324,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
312 | 324 | ||
313 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 325 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
314 | { | 326 | { |
327 | int *last_ran; | ||
328 | |||
329 | last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); | ||
330 | |||
331 | /* | ||
332 | * We might get preempted before the vCPU actually runs, but | ||
333 | * over-invalidation doesn't affect correctness. | ||
334 | */ | ||
335 | if (*last_ran != vcpu->vcpu_id) { | ||
336 | kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu); | ||
337 | *last_ran = vcpu->vcpu_id; | ||
338 | } | ||
339 | |||
315 | vcpu->cpu = cpu; | 340 | vcpu->cpu = cpu; |
316 | vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); | 341 | vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); |
317 | 342 | ||
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c index 729652854f90..6d810af2d9fd 100644 --- a/arch/arm/kvm/hyp/tlb.c +++ b/arch/arm/kvm/hyp/tlb.c | |||
@@ -55,6 +55,21 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | |||
55 | __kvm_tlb_flush_vmid(kvm); | 55 | __kvm_tlb_flush_vmid(kvm); |
56 | } | 56 | } |
57 | 57 | ||
58 | void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) | ||
59 | { | ||
60 | struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); | ||
61 | |||
62 | /* Switch to requested VMID */ | ||
63 | write_sysreg(kvm->arch.vttbr, VTTBR); | ||
64 | isb(); | ||
65 | |||
66 | write_sysreg(0, TLBIALL); | ||
67 | dsb(nsh); | ||
68 | isb(); | ||
69 | |||
70 | write_sysreg(0, VTTBR); | ||
71 | } | ||
72 | |||
58 | void __hyp_text __kvm_flush_vm_context(void) | 73 | void __hyp_text __kvm_flush_vm_context(void) |
59 | { | 74 | { |
60 | write_sysreg(0, TLBIALLNSNHIS); | 75 | write_sysreg(0, TLBIALLNSNHIS); |
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 18f746551bf6..ec3553eb9349 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h | |||
@@ -54,6 +54,7 @@ extern char __kvm_hyp_vector[]; | |||
54 | extern void __kvm_flush_vm_context(void); | 54 | extern void __kvm_flush_vm_context(void); |
55 | extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | 55 | extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
56 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); | 56 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); |
57 | extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); | ||
57 | 58 | ||
58 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | 59 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); |
59 | 60 | ||
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index bd94e6766759..e5050388e062 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -62,6 +62,9 @@ struct kvm_arch { | |||
62 | /* VTTBR value associated with above pgd and vmid */ | 62 | /* VTTBR value associated with above pgd and vmid */ |
63 | u64 vttbr; | 63 | u64 vttbr; |
64 | 64 | ||
65 | /* The last vcpu id that ran on each physical CPU */ | ||
66 | int __percpu *last_vcpu_ran; | ||
67 | |||
65 | /* The maximum number of vCPUs depends on the used GIC model */ | 68 | /* The maximum number of vCPUs depends on the used GIC model */ |
66 | int max_vcpus; | 69 | int max_vcpus; |
67 | 70 | ||
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index a79b969c26fc..6f72fe8b0e3e 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -128,7 +128,7 @@ static inline unsigned long __kern_hyp_va(unsigned long v) | |||
128 | return v; | 128 | return v; |
129 | } | 129 | } |
130 | 130 | ||
131 | #define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) | 131 | #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * We currently only support a 40bit IPA. | 134 | * We currently only support a 40bit IPA. |
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 9cc0ea784ae6..88e2f2b938f0 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c | |||
@@ -64,6 +64,21 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) | |||
64 | write_sysreg(0, vttbr_el2); | 64 | write_sysreg(0, vttbr_el2); |
65 | } | 65 | } |
66 | 66 | ||
67 | void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) | ||
68 | { | ||
69 | struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); | ||
70 | |||
71 | /* Switch to requested VMID */ | ||
72 | write_sysreg(kvm->arch.vttbr, vttbr_el2); | ||
73 | isb(); | ||
74 | |||
75 | asm volatile("tlbi vmalle1" : : ); | ||
76 | dsb(nsh); | ||
77 | isb(); | ||
78 | |||
79 | write_sysreg(0, vttbr_el2); | ||
80 | } | ||
81 | |||
67 | void __hyp_text __kvm_flush_vm_context(void) | 82 | void __hyp_text __kvm_flush_vm_context(void) |
68 | { | 83 | { |
69 | dsb(ishst); | 84 | dsb(ishst); |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index e18b30ddcdce..ebe1b9fa3c4d 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c | |||
@@ -453,17 +453,33 @@ struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev) | |||
453 | return container_of(dev, struct vgic_io_device, dev); | 453 | return container_of(dev, struct vgic_io_device, dev); |
454 | } | 454 | } |
455 | 455 | ||
456 | static bool check_region(const struct vgic_register_region *region, | 456 | static bool check_region(const struct kvm *kvm, |
457 | const struct vgic_register_region *region, | ||
457 | gpa_t addr, int len) | 458 | gpa_t addr, int len) |
458 | { | 459 | { |
459 | if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1) | 460 | int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; |
460 | return true; | 461 | |
461 | if ((region->access_flags & VGIC_ACCESS_32bit) && | 462 | switch (len) { |
462 | len == sizeof(u32) && !(addr & 3)) | 463 | case sizeof(u8): |
463 | return true; | 464 | flags = VGIC_ACCESS_8bit; |
464 | if ((region->access_flags & VGIC_ACCESS_64bit) && | 465 | break; |
465 | len == sizeof(u64) && !(addr & 7)) | 466 | case sizeof(u32): |
466 | return true; | 467 | flags = VGIC_ACCESS_32bit; |
468 | break; | ||
469 | case sizeof(u64): | ||
470 | flags = VGIC_ACCESS_64bit; | ||
471 | break; | ||
472 | default: | ||
473 | return false; | ||
474 | } | ||
475 | |||
476 | if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) { | ||
477 | if (!region->bits_per_irq) | ||
478 | return true; | ||
479 | |||
480 | /* Do we access a non-allocated IRQ? */ | ||
481 | return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs; | ||
482 | } | ||
467 | 483 | ||
468 | return false; | 484 | return false; |
469 | } | 485 | } |
@@ -477,7 +493,7 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, | |||
477 | 493 | ||
478 | region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, | 494 | region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, |
479 | addr - iodev->base_addr); | 495 | addr - iodev->base_addr); |
480 | if (!region || !check_region(region, addr, len)) { | 496 | if (!region || !check_region(vcpu->kvm, region, addr, len)) { |
481 | memset(val, 0, len); | 497 | memset(val, 0, len); |
482 | return 0; | 498 | return 0; |
483 | } | 499 | } |
@@ -510,10 +526,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, | |||
510 | 526 | ||
511 | region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, | 527 | region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, |
512 | addr - iodev->base_addr); | 528 | addr - iodev->base_addr); |
513 | if (!region) | 529 | if (!region || !check_region(vcpu->kvm, region, addr, len)) |
514 | return 0; | ||
515 | |||
516 | if (!check_region(region, addr, len)) | ||
517 | return 0; | 530 | return 0; |
518 | 531 | ||
519 | switch (iodev->iodev_type) { | 532 | switch (iodev->iodev_type) { |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index 4c34d39d44a0..84961b4e4422 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h | |||
@@ -50,15 +50,15 @@ extern struct kvm_io_device_ops kvm_io_gic_ops; | |||
50 | #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1) | 50 | #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1) |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * (addr & mask) gives us the byte offset for the INT ID, so we want to | 53 | * (addr & mask) gives us the _byte_ offset for the INT ID. |
54 | * divide this with 'bytes per irq' to get the INT ID, which is given | 54 | * We multiply this by 8 the get the _bit_ offset, then divide this by |
55 | * by '(bits) / 8'. But we do this with fixed-point-arithmetic and | 55 | * the number of bits to learn the actual INT ID. |
56 | * take advantage of the fact that division by a fraction equals | 56 | * But instead of a division (which requires a "long long div" implementation), |
57 | * multiplication with the inverted fraction, and scale up both the | 57 | * we shift by the binary logarithm of <bits>. |
58 | * numerator and denominator with 8 to support at most 64 bits per IRQ: | 58 | * This assumes that <bits> is a power of two. |
59 | */ | 59 | */ |
60 | #define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \ | 60 | #define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \ |
61 | 64 / (bits) / 8) | 61 | 8 >> ilog2(bits)) |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * Some VGIC registers store per-IRQ information, with a different number | 64 | * Some VGIC registers store per-IRQ information, with a different number |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 2893d5ba523a..6440b56ec90e 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
@@ -273,6 +273,18 @@ retry: | |||
273 | * no more work for us to do. | 273 | * no more work for us to do. |
274 | */ | 274 | */ |
275 | spin_unlock(&irq->irq_lock); | 275 | spin_unlock(&irq->irq_lock); |
276 | |||
277 | /* | ||
278 | * We have to kick the VCPU here, because we could be | ||
279 | * queueing an edge-triggered interrupt for which we | ||
280 | * get no EOI maintenance interrupt. In that case, | ||
281 | * while the IRQ is already on the VCPU's AP list, the | ||
282 | * VCPU could have EOI'ed the original interrupt and | ||
283 | * won't see this one until it exits for some other | ||
284 | * reason. | ||
285 | */ | ||
286 | if (vcpu) | ||
287 | kvm_vcpu_kick(vcpu); | ||
276 | return false; | 288 | return false; |
277 | } | 289 | } |
278 | 290 | ||