diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2014-12-15 07:06:40 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-12-15 07:06:40 -0500 |
commit | 333bce5aac9e8cb7f6b27e0122a224d17be4dd5d (patch) | |
tree | 7a171a51d9a999f90dd6f17f2b7dd0997dbc1a38 | |
parent | ab646f54f4fd1a8b9671b8707f0739fdd28ce2b1 (diff) | |
parent | 05971120fca43e0357789a14b3386bb56eef2201 (diff) |
Merge tag 'kvm-arm-for-3.19-take2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
Second round of changes for KVM for arm/arm64 for v3.19; fixes reboot
problems, clarifies VCPU init, and fixes a regression concerning the
VGIC init flow.
Conflicts:
arch/ia64/kvm/kvm-ia64.c [deleted in HEAD and modified in kvmarm]
-rw-r--r-- | Documentation/virtual/kvm/api.txt | 17 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_emulate.h | 5 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_mmu.h | 6 | ||||
-rw-r--r-- | arch/arm/kvm/arm.c | 78 | ||||
-rw-r--r-- | arch/arm/kvm/guest.c | 26 | ||||
-rw-r--r-- | arch/arm/kvm/mmio.c | 15 | ||||
-rw-r--r-- | arch/arm/kvm/mmu.c | 99 | ||||
-rw-r--r-- | arch/arm/kvm/psci.c | 18 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 6 | ||||
-rw-r--r-- | arch/arm64/kvm/guest.c | 26 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 6 | ||||
-rw-r--r-- | include/kvm/arm_arch_timer.h | 10 | ||||
-rw-r--r-- | include/kvm/arm_vgic.h | 12 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 3 | ||||
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 30 | ||||
-rw-r--r-- | virt/kvm/arm/vgic.c | 116 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 16 |
20 files changed, 335 insertions, 164 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 80bfe59fc992..0007fef4ed81 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
@@ -2478,9 +2478,15 @@ return ENOEXEC for that vcpu. | |||
2478 | Note that because some registers reflect machine topology, all vcpus | 2478 | Note that because some registers reflect machine topology, all vcpus |
2479 | should be created before this ioctl is invoked. | 2479 | should be created before this ioctl is invoked. |
2480 | 2480 | ||
2481 | Userspace can call this function multiple times for a given vcpu, including | ||
2482 | after the vcpu has been run. This will reset the vcpu to its initial | ||
2483 | state. All calls to this function after the initial call must use the same | ||
2484 | target and same set of feature flags, otherwise EINVAL will be returned. | ||
2485 | |||
2481 | Possible features: | 2486 | Possible features: |
2482 | - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state. | 2487 | - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state. |
2483 | Depends on KVM_CAP_ARM_PSCI. | 2488 | Depends on KVM_CAP_ARM_PSCI. If not set, the CPU will be powered on |
2489 | and execute guest code when KVM_RUN is called. | ||
2484 | - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode. | 2490 | - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode. |
2485 | Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). | 2491 | Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). |
2486 | - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU. | 2492 | - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU. |
@@ -2976,6 +2982,15 @@ HVC instruction based PSCI call from the vcpu. The 'type' field describes | |||
2976 | the system-level event type. The 'flags' field describes architecture | 2982 | the system-level event type. The 'flags' field describes architecture |
2977 | specific flags for the system-level event. | 2983 | specific flags for the system-level event. |
2978 | 2984 | ||
2985 | Valid values for 'type' are: | ||
2986 | KVM_SYSTEM_EVENT_SHUTDOWN -- the guest has requested a shutdown of the | ||
2987 | VM. Userspace is not obliged to honour this, and if it does honour | ||
2988 | this does not need to destroy the VM synchronously (ie it may call | ||
2989 | KVM_RUN again before shutdown finally occurs). | ||
2990 | KVM_SYSTEM_EVENT_RESET -- the guest has requested a reset of the VM. | ||
2991 | As with SHUTDOWN, userspace can choose to ignore the request, or | ||
2992 | to schedule the reset to occur in the future and may call KVM_RUN again. | ||
2993 | |||
2979 | /* Fix the size of the union. */ | 2994 | /* Fix the size of the union. */ |
2980 | char padding[256]; | 2995 | char padding[256]; |
2981 | }; | 2996 | }; |
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index b9db269c6e61..66ce17655bb9 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu); | |||
33 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | 33 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
34 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | 34 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); |
35 | 35 | ||
36 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) | ||
37 | { | ||
38 | vcpu->arch.hcr = HCR_GUEST_MASK; | ||
39 | } | ||
40 | |||
36 | static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) | 41 | static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) |
37 | { | 42 | { |
38 | return 1; | 43 | return 1; |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 53036e21756b..254e0650e48b 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -150,8 +150,6 @@ struct kvm_vcpu_stat { | |||
150 | u32 halt_wakeup; | 150 | u32 halt_wakeup; |
151 | }; | 151 | }; |
152 | 152 | ||
153 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | ||
154 | const struct kvm_vcpu_init *init); | ||
155 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); | 153 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); |
156 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); | 154 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); |
157 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | 155 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); |
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index acb0d5712716..63e0ecc04901 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -52,6 +52,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t); | |||
52 | void free_boot_hyp_pgd(void); | 52 | void free_boot_hyp_pgd(void); |
53 | void free_hyp_pgds(void); | 53 | void free_hyp_pgds(void); |
54 | 54 | ||
55 | void stage2_unmap_vm(struct kvm *kvm); | ||
55 | int kvm_alloc_stage2_pgd(struct kvm *kvm); | 56 | int kvm_alloc_stage2_pgd(struct kvm *kvm); |
56 | void kvm_free_stage2_pgd(struct kvm *kvm); | 57 | void kvm_free_stage2_pgd(struct kvm *kvm); |
57 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | 58 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
@@ -161,9 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) | |||
161 | } | 162 | } |
162 | 163 | ||
163 | static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, | 164 | static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, |
164 | unsigned long size) | 165 | unsigned long size, |
166 | bool ipa_uncached) | ||
165 | { | 167 | { |
166 | if (!vcpu_has_cache_enabled(vcpu)) | 168 | if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) |
167 | kvm_flush_dcache_to_poc((void *)hva, size); | 169 | kvm_flush_dcache_to_poc((void *)hva, size); |
168 | 170 | ||
169 | /* | 171 | /* |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 9e193c8a959e..2d6d91001062 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -213,6 +213,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | |||
213 | int err; | 213 | int err; |
214 | struct kvm_vcpu *vcpu; | 214 | struct kvm_vcpu *vcpu; |
215 | 215 | ||
216 | if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) { | ||
217 | err = -EBUSY; | ||
218 | goto out; | ||
219 | } | ||
220 | |||
216 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | 221 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
217 | if (!vcpu) { | 222 | if (!vcpu) { |
218 | err = -ENOMEM; | 223 | err = -ENOMEM; |
@@ -263,6 +268,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
263 | { | 268 | { |
264 | /* Force users to call KVM_ARM_VCPU_INIT */ | 269 | /* Force users to call KVM_ARM_VCPU_INIT */ |
265 | vcpu->arch.target = -1; | 270 | vcpu->arch.target = -1; |
271 | bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); | ||
266 | 272 | ||
267 | /* Set up the timer */ | 273 | /* Set up the timer */ |
268 | kvm_timer_vcpu_init(vcpu); | 274 | kvm_timer_vcpu_init(vcpu); |
@@ -419,6 +425,7 @@ static void update_vttbr(struct kvm *kvm) | |||
419 | 425 | ||
420 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) | 426 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) |
421 | { | 427 | { |
428 | struct kvm *kvm = vcpu->kvm; | ||
422 | int ret; | 429 | int ret; |
423 | 430 | ||
424 | if (likely(vcpu->arch.has_run_once)) | 431 | if (likely(vcpu->arch.has_run_once)) |
@@ -427,15 +434,23 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) | |||
427 | vcpu->arch.has_run_once = true; | 434 | vcpu->arch.has_run_once = true; |
428 | 435 | ||
429 | /* | 436 | /* |
430 | * Initialize the VGIC before running a vcpu the first time on | 437 | * Map the VGIC hardware resources before running a vcpu the first |
431 | * this VM. | 438 | * time on this VM. |
432 | */ | 439 | */ |
433 | if (unlikely(!vgic_initialized(vcpu->kvm))) { | 440 | if (unlikely(!vgic_ready(kvm))) { |
434 | ret = kvm_vgic_init(vcpu->kvm); | 441 | ret = kvm_vgic_map_resources(kvm); |
435 | if (ret) | 442 | if (ret) |
436 | return ret; | 443 | return ret; |
437 | } | 444 | } |
438 | 445 | ||
446 | /* | ||
447 | * Enable the arch timers only if we have an in-kernel VGIC | ||
448 | * and it has been properly initialized, since we cannot handle | ||
449 | * interrupts from the virtual timer with a userspace gic. | ||
450 | */ | ||
451 | if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) | ||
452 | kvm_timer_enable(kvm); | ||
453 | |||
439 | return 0; | 454 | return 0; |
440 | } | 455 | } |
441 | 456 | ||
@@ -649,6 +664,48 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, | |||
649 | return -EINVAL; | 664 | return -EINVAL; |
650 | } | 665 | } |
651 | 666 | ||
667 | static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | ||
668 | const struct kvm_vcpu_init *init) | ||
669 | { | ||
670 | unsigned int i; | ||
671 | int phys_target = kvm_target_cpu(); | ||
672 | |||
673 | if (init->target != phys_target) | ||
674 | return -EINVAL; | ||
675 | |||
676 | /* | ||
677 | * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must | ||
678 | * use the same target. | ||
679 | */ | ||
680 | if (vcpu->arch.target != -1 && vcpu->arch.target != init->target) | ||
681 | return -EINVAL; | ||
682 | |||
683 | /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ | ||
684 | for (i = 0; i < sizeof(init->features) * 8; i++) { | ||
685 | bool set = (init->features[i / 32] & (1 << (i % 32))); | ||
686 | |||
687 | if (set && i >= KVM_VCPU_MAX_FEATURES) | ||
688 | return -ENOENT; | ||
689 | |||
690 | /* | ||
691 | * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must | ||
692 | * use the same feature set. | ||
693 | */ | ||
694 | if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES && | ||
695 | test_bit(i, vcpu->arch.features) != set) | ||
696 | return -EINVAL; | ||
697 | |||
698 | if (set) | ||
699 | set_bit(i, vcpu->arch.features); | ||
700 | } | ||
701 | |||
702 | vcpu->arch.target = phys_target; | ||
703 | |||
704 | /* Now we know what it is, we can reset it. */ | ||
705 | return kvm_reset_vcpu(vcpu); | ||
706 | } | ||
707 | |||
708 | |||
652 | static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, | 709 | static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, |
653 | struct kvm_vcpu_init *init) | 710 | struct kvm_vcpu_init *init) |
654 | { | 711 | { |
@@ -659,10 +716,21 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, | |||
659 | return ret; | 716 | return ret; |
660 | 717 | ||
661 | /* | 718 | /* |
719 | * Ensure a rebooted VM will fault in RAM pages and detect if the | ||
720 | * guest MMU is turned off and flush the caches as needed. | ||
721 | */ | ||
722 | if (vcpu->arch.has_run_once) | ||
723 | stage2_unmap_vm(vcpu->kvm); | ||
724 | |||
725 | vcpu_reset_hcr(vcpu); | ||
726 | |||
727 | /* | ||
662 | * Handle the "start in power-off" case by marking the VCPU as paused. | 728 | * Handle the "start in power-off" case by marking the VCPU as paused. |
663 | */ | 729 | */ |
664 | if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) | 730 | if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) |
665 | vcpu->arch.pause = true; | 731 | vcpu->arch.pause = true; |
732 | else | ||
733 | vcpu->arch.pause = false; | ||
666 | 734 | ||
667 | return 0; | 735 | return 0; |
668 | } | 736 | } |
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index cc0b78769bd8..384bab67c462 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c | |||
@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
38 | 38 | ||
39 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 39 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
40 | { | 40 | { |
41 | vcpu->arch.hcr = HCR_GUEST_MASK; | ||
42 | return 0; | 41 | return 0; |
43 | } | 42 | } |
44 | 43 | ||
@@ -274,31 +273,6 @@ int __attribute_const__ kvm_target_cpu(void) | |||
274 | } | 273 | } |
275 | } | 274 | } |
276 | 275 | ||
277 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | ||
278 | const struct kvm_vcpu_init *init) | ||
279 | { | ||
280 | unsigned int i; | ||
281 | |||
282 | /* We can only cope with guest==host and only on A15/A7 (for now). */ | ||
283 | if (init->target != kvm_target_cpu()) | ||
284 | return -EINVAL; | ||
285 | |||
286 | vcpu->arch.target = init->target; | ||
287 | bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); | ||
288 | |||
289 | /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ | ||
290 | for (i = 0; i < sizeof(init->features) * 8; i++) { | ||
291 | if (test_bit(i, (void *)init->features)) { | ||
292 | if (i >= KVM_VCPU_MAX_FEATURES) | ||
293 | return -ENOENT; | ||
294 | set_bit(i, vcpu->arch.features); | ||
295 | } | ||
296 | } | ||
297 | |||
298 | /* Now we know what it is, we can reset it. */ | ||
299 | return kvm_reset_vcpu(vcpu); | ||
300 | } | ||
301 | |||
302 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) | 276 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) |
303 | { | 277 | { |
304 | int target = kvm_target_cpu(); | 278 | int target = kvm_target_cpu(); |
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 4cb5a93182e9..5d3bfc0eb3f0 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c | |||
@@ -187,15 +187,18 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
187 | } | 187 | } |
188 | 188 | ||
189 | rt = vcpu->arch.mmio_decode.rt; | 189 | rt = vcpu->arch.mmio_decode.rt; |
190 | data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len); | ||
191 | 190 | ||
192 | trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE : | 191 | if (mmio.is_write) { |
193 | KVM_TRACE_MMIO_READ_UNSATISFIED, | 192 | data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), |
194 | mmio.len, fault_ipa, | 193 | mmio.len); |
195 | (mmio.is_write) ? data : 0); | ||
196 | 194 | ||
197 | if (mmio.is_write) | 195 | trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len, |
196 | fault_ipa, data); | ||
198 | mmio_write_buf(mmio.data, mmio.len, data); | 197 | mmio_write_buf(mmio.data, mmio.len, data); |
198 | } else { | ||
199 | trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len, | ||
200 | fault_ipa, 0); | ||
201 | } | ||
199 | 202 | ||
200 | if (vgic_handle_mmio(vcpu, run, &mmio)) | 203 | if (vgic_handle_mmio(vcpu, run, &mmio)) |
201 | return 1; | 204 | return 1; |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 57a403a5c22b..3756dd3e85c2 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -611,6 +611,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | |||
611 | unmap_range(kvm, kvm->arch.pgd, start, size); | 611 | unmap_range(kvm, kvm->arch.pgd, start, size); |
612 | } | 612 | } |
613 | 613 | ||
614 | static void stage2_unmap_memslot(struct kvm *kvm, | ||
615 | struct kvm_memory_slot *memslot) | ||
616 | { | ||
617 | hva_t hva = memslot->userspace_addr; | ||
618 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | ||
619 | phys_addr_t size = PAGE_SIZE * memslot->npages; | ||
620 | hva_t reg_end = hva + size; | ||
621 | |||
622 | /* | ||
623 | * A memory region could potentially cover multiple VMAs, and any holes | ||
624 | * between them, so iterate over all of them to find out if we should | ||
625 | * unmap any of them. | ||
626 | * | ||
627 | * +--------------------------------------------+ | ||
628 | * +---------------+----------------+ +----------------+ | ||
629 | * | : VMA 1 | VMA 2 | | VMA 3 : | | ||
630 | * +---------------+----------------+ +----------------+ | ||
631 | * | memory region | | ||
632 | * +--------------------------------------------+ | ||
633 | */ | ||
634 | do { | ||
635 | struct vm_area_struct *vma = find_vma(current->mm, hva); | ||
636 | hva_t vm_start, vm_end; | ||
637 | |||
638 | if (!vma || vma->vm_start >= reg_end) | ||
639 | break; | ||
640 | |||
641 | /* | ||
642 | * Take the intersection of this VMA with the memory region | ||
643 | */ | ||
644 | vm_start = max(hva, vma->vm_start); | ||
645 | vm_end = min(reg_end, vma->vm_end); | ||
646 | |||
647 | if (!(vma->vm_flags & VM_PFNMAP)) { | ||
648 | gpa_t gpa = addr + (vm_start - memslot->userspace_addr); | ||
649 | unmap_stage2_range(kvm, gpa, vm_end - vm_start); | ||
650 | } | ||
651 | hva = vm_end; | ||
652 | } while (hva < reg_end); | ||
653 | } | ||
654 | |||
655 | /** | ||
656 | * stage2_unmap_vm - Unmap Stage-2 RAM mappings | ||
657 | * @kvm: The struct kvm pointer | ||
658 | * | ||
659 | * Go through the memregions and unmap any reguler RAM | ||
660 | * backing memory already mapped to the VM. | ||
661 | */ | ||
662 | void stage2_unmap_vm(struct kvm *kvm) | ||
663 | { | ||
664 | struct kvm_memslots *slots; | ||
665 | struct kvm_memory_slot *memslot; | ||
666 | int idx; | ||
667 | |||
668 | idx = srcu_read_lock(&kvm->srcu); | ||
669 | spin_lock(&kvm->mmu_lock); | ||
670 | |||
671 | slots = kvm_memslots(kvm); | ||
672 | kvm_for_each_memslot(memslot, slots) | ||
673 | stage2_unmap_memslot(kvm, memslot); | ||
674 | |||
675 | spin_unlock(&kvm->mmu_lock); | ||
676 | srcu_read_unlock(&kvm->srcu, idx); | ||
677 | } | ||
678 | |||
614 | /** | 679 | /** |
615 | * kvm_free_stage2_pgd - free all stage-2 tables | 680 | * kvm_free_stage2_pgd - free all stage-2 tables |
616 | * @kvm: The KVM struct pointer for the VM. | 681 | * @kvm: The KVM struct pointer for the VM. |
@@ -834,6 +899,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) | |||
834 | return kvm_vcpu_dabt_iswrite(vcpu); | 899 | return kvm_vcpu_dabt_iswrite(vcpu); |
835 | } | 900 | } |
836 | 901 | ||
902 | static bool kvm_is_device_pfn(unsigned long pfn) | ||
903 | { | ||
904 | return !pfn_valid(pfn); | ||
905 | } | ||
906 | |||
837 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | 907 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
838 | struct kvm_memory_slot *memslot, unsigned long hva, | 908 | struct kvm_memory_slot *memslot, unsigned long hva, |
839 | unsigned long fault_status) | 909 | unsigned long fault_status) |
@@ -847,6 +917,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
847 | struct vm_area_struct *vma; | 917 | struct vm_area_struct *vma; |
848 | pfn_t pfn; | 918 | pfn_t pfn; |
849 | pgprot_t mem_type = PAGE_S2; | 919 | pgprot_t mem_type = PAGE_S2; |
920 | bool fault_ipa_uncached; | ||
850 | 921 | ||
851 | write_fault = kvm_is_write_fault(vcpu); | 922 | write_fault = kvm_is_write_fault(vcpu); |
852 | if (fault_status == FSC_PERM && !write_fault) { | 923 | if (fault_status == FSC_PERM && !write_fault) { |
@@ -904,7 +975,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
904 | if (is_error_pfn(pfn)) | 975 | if (is_error_pfn(pfn)) |
905 | return -EFAULT; | 976 | return -EFAULT; |
906 | 977 | ||
907 | if (kvm_is_mmio_pfn(pfn)) | 978 | if (kvm_is_device_pfn(pfn)) |
908 | mem_type = PAGE_S2_DEVICE; | 979 | mem_type = PAGE_S2_DEVICE; |
909 | 980 | ||
910 | spin_lock(&kvm->mmu_lock); | 981 | spin_lock(&kvm->mmu_lock); |
@@ -913,6 +984,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
913 | if (!hugetlb && !force_pte) | 984 | if (!hugetlb && !force_pte) |
914 | hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); | 985 | hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); |
915 | 986 | ||
987 | fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT; | ||
988 | |||
916 | if (hugetlb) { | 989 | if (hugetlb) { |
917 | pmd_t new_pmd = pfn_pmd(pfn, mem_type); | 990 | pmd_t new_pmd = pfn_pmd(pfn, mem_type); |
918 | new_pmd = pmd_mkhuge(new_pmd); | 991 | new_pmd = pmd_mkhuge(new_pmd); |
@@ -920,7 +993,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
920 | kvm_set_s2pmd_writable(&new_pmd); | 993 | kvm_set_s2pmd_writable(&new_pmd); |
921 | kvm_set_pfn_dirty(pfn); | 994 | kvm_set_pfn_dirty(pfn); |
922 | } | 995 | } |
923 | coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE); | 996 | coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE, |
997 | fault_ipa_uncached); | ||
924 | ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); | 998 | ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); |
925 | } else { | 999 | } else { |
926 | pte_t new_pte = pfn_pte(pfn, mem_type); | 1000 | pte_t new_pte = pfn_pte(pfn, mem_type); |
@@ -928,7 +1002,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
928 | kvm_set_s2pte_writable(&new_pte); | 1002 | kvm_set_s2pte_writable(&new_pte); |
929 | kvm_set_pfn_dirty(pfn); | 1003 | kvm_set_pfn_dirty(pfn); |
930 | } | 1004 | } |
931 | coherent_cache_guest_page(vcpu, hva, PAGE_SIZE); | 1005 | coherent_cache_guest_page(vcpu, hva, PAGE_SIZE, |
1006 | fault_ipa_uncached); | ||
932 | ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, | 1007 | ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, |
933 | pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); | 1008 | pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); |
934 | } | 1009 | } |
@@ -1288,11 +1363,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, | |||
1288 | hva = vm_end; | 1363 | hva = vm_end; |
1289 | } while (hva < reg_end); | 1364 | } while (hva < reg_end); |
1290 | 1365 | ||
1291 | if (ret) { | 1366 | spin_lock(&kvm->mmu_lock); |
1292 | spin_lock(&kvm->mmu_lock); | 1367 | if (ret) |
1293 | unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size); | 1368 | unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size); |
1294 | spin_unlock(&kvm->mmu_lock); | 1369 | else |
1295 | } | 1370 | stage2_flush_memslot(kvm, memslot); |
1371 | spin_unlock(&kvm->mmu_lock); | ||
1296 | return ret; | 1372 | return ret; |
1297 | } | 1373 | } |
1298 | 1374 | ||
@@ -1304,6 +1380,15 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, | |||
1304 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, | 1380 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
1305 | unsigned long npages) | 1381 | unsigned long npages) |
1306 | { | 1382 | { |
1383 | /* | ||
1384 | * Readonly memslots are not incoherent with the caches by definition, | ||
1385 | * but in practice, they are used mostly to emulate ROMs or NOR flashes | ||
1386 | * that the guest may consider devices and hence map as uncached. | ||
1387 | * To prevent incoherency issues in these cases, tag all readonly | ||
1388 | * regions as incoherent. | ||
1389 | */ | ||
1390 | if (slot->flags & KVM_MEM_READONLY) | ||
1391 | slot->flags |= KVM_MEMSLOT_INCOHERENT; | ||
1307 | return 0; | 1392 | return 0; |
1308 | } | 1393 | } |
1309 | 1394 | ||
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index 09cf37737ee2..58cb3248d277 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/preempt.h> | ||
18 | #include <linux/kvm_host.h> | 19 | #include <linux/kvm_host.h> |
19 | #include <linux/wait.h> | 20 | #include <linux/wait.h> |
20 | 21 | ||
@@ -166,6 +167,23 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) | |||
166 | 167 | ||
167 | static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type) | 168 | static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type) |
168 | { | 169 | { |
170 | int i; | ||
171 | struct kvm_vcpu *tmp; | ||
172 | |||
173 | /* | ||
174 | * The KVM ABI specifies that a system event exit may call KVM_RUN | ||
175 | * again and may perform shutdown/reboot at a later time that when the | ||
176 | * actual request is made. Since we are implementing PSCI and a | ||
177 | * caller of PSCI reboot and shutdown expects that the system shuts | ||
178 | * down or reboots immediately, let's make sure that VCPUs are not run | ||
179 | * after this call is handled and before the VCPUs have been | ||
180 | * re-initialized. | ||
181 | */ | ||
182 | kvm_for_each_vcpu(i, tmp, vcpu->kvm) { | ||
183 | tmp->arch.pause = true; | ||
184 | kvm_vcpu_kick(tmp); | ||
185 | } | ||
186 | |||
169 | memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); | 187 | memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); |
170 | vcpu->run->system_event.type = type; | 188 | vcpu->run->system_event.type = type; |
171 | vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; | 189 | vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; |
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 5674a55b5518..8127e45e2637 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -38,6 +38,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu); | |||
38 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | 38 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
39 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | 39 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); |
40 | 40 | ||
41 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) | ||
42 | { | ||
43 | vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; | ||
44 | } | ||
45 | |||
41 | static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) | 46 | static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) |
42 | { | 47 | { |
43 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; | 48 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2012c4ba8d67..0b7dfdb931df 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -165,8 +165,6 @@ struct kvm_vcpu_stat { | |||
165 | u32 halt_wakeup; | 165 | u32 halt_wakeup; |
166 | }; | 166 | }; |
167 | 167 | ||
168 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | ||
169 | const struct kvm_vcpu_init *init); | ||
170 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); | 168 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); |
171 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); | 169 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); |
172 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | 170 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); |
@@ -200,6 +198,7 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void); | |||
200 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); | 198 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); |
201 | 199 | ||
202 | u64 kvm_call_hyp(void *hypfn, ...); | 200 | u64 kvm_call_hyp(void *hypfn, ...); |
201 | void force_vm_exit(const cpumask_t *mask); | ||
203 | 202 | ||
204 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | 203 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, |
205 | int exception_index); | 204 | int exception_index); |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 0caf7a59f6a1..14a74f136272 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -83,6 +83,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t); | |||
83 | void free_boot_hyp_pgd(void); | 83 | void free_boot_hyp_pgd(void); |
84 | void free_hyp_pgds(void); | 84 | void free_hyp_pgds(void); |
85 | 85 | ||
86 | void stage2_unmap_vm(struct kvm *kvm); | ||
86 | int kvm_alloc_stage2_pgd(struct kvm *kvm); | 87 | int kvm_alloc_stage2_pgd(struct kvm *kvm); |
87 | void kvm_free_stage2_pgd(struct kvm *kvm); | 88 | void kvm_free_stage2_pgd(struct kvm *kvm); |
88 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | 89 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
@@ -243,9 +244,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) | |||
243 | } | 244 | } |
244 | 245 | ||
245 | static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, | 246 | static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, |
246 | unsigned long size) | 247 | unsigned long size, |
248 | bool ipa_uncached) | ||
247 | { | 249 | { |
248 | if (!vcpu_has_cache_enabled(vcpu)) | 250 | if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) |
249 | kvm_flush_dcache_to_poc((void *)hva, size); | 251 | kvm_flush_dcache_to_poc((void *)hva, size); |
250 | 252 | ||
251 | if (!icache_is_aliasing()) { /* PIPT */ | 253 | if (!icache_is_aliasing()) { /* PIPT */ |
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 76794692c20b..9535bd555d1d 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c | |||
@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
38 | 38 | ||
39 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 39 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
40 | { | 40 | { |
41 | vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; | ||
42 | return 0; | 41 | return 0; |
43 | } | 42 | } |
44 | 43 | ||
@@ -297,31 +296,6 @@ int __attribute_const__ kvm_target_cpu(void) | |||
297 | return -EINVAL; | 296 | return -EINVAL; |
298 | } | 297 | } |
299 | 298 | ||
300 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | ||
301 | const struct kvm_vcpu_init *init) | ||
302 | { | ||
303 | unsigned int i; | ||
304 | int phys_target = kvm_target_cpu(); | ||
305 | |||
306 | if (init->target != phys_target) | ||
307 | return -EINVAL; | ||
308 | |||
309 | vcpu->arch.target = phys_target; | ||
310 | bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); | ||
311 | |||
312 | /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ | ||
313 | for (i = 0; i < sizeof(init->features) * 8; i++) { | ||
314 | if (init->features[i / 32] & (1 << (i % 32))) { | ||
315 | if (i >= KVM_VCPU_MAX_FEATURES) | ||
316 | return -ENOENT; | ||
317 | set_bit(i, vcpu->arch.features); | ||
318 | } | ||
319 | } | ||
320 | |||
321 | /* Now we know what it is, we can reset it. */ | ||
322 | return kvm_reset_vcpu(vcpu); | ||
323 | } | ||
324 | |||
325 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) | 299 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) |
326 | { | 300 | { |
327 | int target = kvm_target_cpu(); | 301 | int target = kvm_target_cpu(); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4ea0dcb0b21b..10fbed126b11 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -629,7 +629,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep) | |||
629 | * kvm mmu, before reclaiming the page, we should | 629 | * kvm mmu, before reclaiming the page, we should |
630 | * unmap it from mmu first. | 630 | * unmap it from mmu first. |
631 | */ | 631 | */ |
632 | WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn))); | 632 | WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); |
633 | 633 | ||
634 | if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) | 634 | if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) |
635 | kvm_set_pfn_accessed(pfn); | 635 | kvm_set_pfn_accessed(pfn); |
@@ -2460,7 +2460,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
2460 | spte |= PT_PAGE_SIZE_MASK; | 2460 | spte |= PT_PAGE_SIZE_MASK; |
2461 | if (tdp_enabled) | 2461 | if (tdp_enabled) |
2462 | spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, | 2462 | spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, |
2463 | kvm_is_mmio_pfn(pfn)); | 2463 | kvm_is_reserved_pfn(pfn)); |
2464 | 2464 | ||
2465 | if (host_writable) | 2465 | if (host_writable) |
2466 | spte |= SPTE_HOST_WRITEABLE; | 2466 | spte |= SPTE_HOST_WRITEABLE; |
@@ -2736,7 +2736,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, | |||
2736 | * PT_PAGE_TABLE_LEVEL and there would be no adjustment done | 2736 | * PT_PAGE_TABLE_LEVEL and there would be no adjustment done |
2737 | * here. | 2737 | * here. |
2738 | */ | 2738 | */ |
2739 | if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && | 2739 | if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && |
2740 | level == PT_PAGE_TABLE_LEVEL && | 2740 | level == PT_PAGE_TABLE_LEVEL && |
2741 | PageTransCompound(pfn_to_page(pfn)) && | 2741 | PageTransCompound(pfn_to_page(pfn)) && |
2742 | !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { | 2742 | !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { |
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index ad9db6045b2f..b3f45a578344 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h | |||
@@ -60,7 +60,8 @@ struct arch_timer_cpu { | |||
60 | 60 | ||
61 | #ifdef CONFIG_KVM_ARM_TIMER | 61 | #ifdef CONFIG_KVM_ARM_TIMER |
62 | int kvm_timer_hyp_init(void); | 62 | int kvm_timer_hyp_init(void); |
63 | int kvm_timer_init(struct kvm *kvm); | 63 | void kvm_timer_enable(struct kvm *kvm); |
64 | void kvm_timer_init(struct kvm *kvm); | ||
64 | void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, | 65 | void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, |
65 | const struct kvm_irq_level *irq); | 66 | const struct kvm_irq_level *irq); |
66 | void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); | 67 | void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); |
@@ -77,11 +78,8 @@ static inline int kvm_timer_hyp_init(void) | |||
77 | return 0; | 78 | return 0; |
78 | }; | 79 | }; |
79 | 80 | ||
80 | static inline int kvm_timer_init(struct kvm *kvm) | 81 | static inline void kvm_timer_enable(struct kvm *kvm) {} |
81 | { | 82 | static inline void kvm_timer_init(struct kvm *kvm) {} |
82 | return 0; | ||
83 | } | ||
84 | |||
85 | static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, | 83 | static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, |
86 | const struct kvm_irq_level *irq) {} | 84 | const struct kvm_irq_level *irq) {} |
87 | static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {} | 85 | static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {} |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 206dcc3b3f7a..ac4888dc86bc 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -274,7 +274,7 @@ struct kvm_exit_mmio; | |||
274 | #ifdef CONFIG_KVM_ARM_VGIC | 274 | #ifdef CONFIG_KVM_ARM_VGIC |
275 | int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); | 275 | int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); |
276 | int kvm_vgic_hyp_init(void); | 276 | int kvm_vgic_hyp_init(void); |
277 | int kvm_vgic_init(struct kvm *kvm); | 277 | int kvm_vgic_map_resources(struct kvm *kvm); |
278 | int kvm_vgic_create(struct kvm *kvm); | 278 | int kvm_vgic_create(struct kvm *kvm); |
279 | void kvm_vgic_destroy(struct kvm *kvm); | 279 | void kvm_vgic_destroy(struct kvm *kvm); |
280 | void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); | 280 | void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); |
@@ -287,7 +287,8 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
287 | struct kvm_exit_mmio *mmio); | 287 | struct kvm_exit_mmio *mmio); |
288 | 288 | ||
289 | #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) | 289 | #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) |
290 | #define vgic_initialized(k) ((k)->arch.vgic.ready) | 290 | #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) |
291 | #define vgic_ready(k) ((k)->arch.vgic.ready) | ||
291 | 292 | ||
292 | int vgic_v2_probe(struct device_node *vgic_node, | 293 | int vgic_v2_probe(struct device_node *vgic_node, |
293 | const struct vgic_ops **ops, | 294 | const struct vgic_ops **ops, |
@@ -321,7 +322,7 @@ static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, | |||
321 | return -ENXIO; | 322 | return -ENXIO; |
322 | } | 323 | } |
323 | 324 | ||
324 | static inline int kvm_vgic_init(struct kvm *kvm) | 325 | static inline int kvm_vgic_map_resources(struct kvm *kvm) |
325 | { | 326 | { |
326 | return 0; | 327 | return 0; |
327 | } | 328 | } |
@@ -373,6 +374,11 @@ static inline bool vgic_initialized(struct kvm *kvm) | |||
373 | { | 374 | { |
374 | return true; | 375 | return true; |
375 | } | 376 | } |
377 | |||
378 | static inline bool vgic_ready(struct kvm *kvm) | ||
379 | { | ||
380 | return true; | ||
381 | } | ||
376 | #endif | 382 | #endif |
377 | 383 | ||
378 | #endif | 384 | #endif |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 193bca68372d..26f106022c88 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -43,6 +43,7 @@ | |||
43 | * include/linux/kvm_h. | 43 | * include/linux/kvm_h. |
44 | */ | 44 | */ |
45 | #define KVM_MEMSLOT_INVALID (1UL << 16) | 45 | #define KVM_MEMSLOT_INVALID (1UL << 16) |
46 | #define KVM_MEMSLOT_INCOHERENT (1UL << 17) | ||
46 | 47 | ||
47 | /* Two fragments for cross MMIO pages. */ | 48 | /* Two fragments for cross MMIO pages. */ |
48 | #define KVM_MAX_MMIO_FRAGMENTS 2 | 49 | #define KVM_MAX_MMIO_FRAGMENTS 2 |
@@ -712,7 +713,7 @@ void kvm_arch_sync_events(struct kvm *kvm); | |||
712 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); | 713 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
713 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); | 714 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
714 | 715 | ||
715 | bool kvm_is_mmio_pfn(pfn_t pfn); | 716 | bool kvm_is_reserved_pfn(pfn_t pfn); |
716 | 717 | ||
717 | struct kvm_irq_ack_notifier { | 718 | struct kvm_irq_ack_notifier { |
718 | struct hlist_node link; | 719 | struct hlist_node link; |
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 22fa819a9b6a..1c0772b340d8 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer) | |||
61 | 61 | ||
62 | static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu) | 62 | static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu) |
63 | { | 63 | { |
64 | int ret; | ||
64 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | 65 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; |
65 | 66 | ||
66 | timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK; | 67 | timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK; |
67 | kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, | 68 | ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, |
68 | timer->irq->irq, | 69 | timer->irq->irq, |
69 | timer->irq->level); | 70 | timer->irq->level); |
71 | WARN_ON(ret); | ||
70 | } | 72 | } |
71 | 73 | ||
72 | static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) | 74 | static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) |
@@ -307,12 +309,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) | |||
307 | timer_disarm(timer); | 309 | timer_disarm(timer); |
308 | } | 310 | } |
309 | 311 | ||
310 | int kvm_timer_init(struct kvm *kvm) | 312 | void kvm_timer_enable(struct kvm *kvm) |
311 | { | 313 | { |
312 | if (timecounter && wqueue) { | 314 | if (kvm->arch.timer.enabled) |
313 | kvm->arch.timer.cntvoff = kvm_phys_timer_read(); | 315 | return; |
316 | |||
317 | /* | ||
318 | * There is a potential race here between VCPUs starting for the first | ||
319 | * time, which may be enabling the timer multiple times. That doesn't | ||
320 | * hurt though, because we're just setting a variable to the same | ||
321 | * variable that it already was. The important thing is that all | ||
322 | * VCPUs have the enabled variable set, before entering the guest, if | ||
323 | * the arch timers are enabled. | ||
324 | */ | ||
325 | if (timecounter && wqueue) | ||
314 | kvm->arch.timer.enabled = 1; | 326 | kvm->arch.timer.enabled = 1; |
315 | } | 327 | } |
316 | 328 | ||
317 | return 0; | 329 | void kvm_timer_init(struct kvm *kvm) |
330 | { | ||
331 | kvm->arch.timer.cntvoff = kvm_phys_timer_read(); | ||
318 | } | 332 | } |
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 3aaca49de325..e373b76c5420 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -91,6 +91,7 @@ | |||
91 | #define ACCESS_WRITE_VALUE (3 << 1) | 91 | #define ACCESS_WRITE_VALUE (3 << 1) |
92 | #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) | 92 | #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) |
93 | 93 | ||
94 | static int vgic_init(struct kvm *kvm); | ||
94 | static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); | 95 | static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); |
95 | static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); | 96 | static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); |
96 | static void vgic_update_state(struct kvm *kvm); | 97 | static void vgic_update_state(struct kvm *kvm); |
@@ -1607,7 +1608,7 @@ static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) | |||
1607 | } | 1608 | } |
1608 | } | 1609 | } |
1609 | 1610 | ||
1610 | static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid, | 1611 | static int vgic_update_irq_pending(struct kvm *kvm, int cpuid, |
1611 | unsigned int irq_num, bool level) | 1612 | unsigned int irq_num, bool level) |
1612 | { | 1613 | { |
1613 | struct vgic_dist *dist = &kvm->arch.vgic; | 1614 | struct vgic_dist *dist = &kvm->arch.vgic; |
@@ -1643,9 +1644,10 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid, | |||
1643 | vgic_dist_irq_clear_level(vcpu, irq_num); | 1644 | vgic_dist_irq_clear_level(vcpu, irq_num); |
1644 | if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) | 1645 | if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) |
1645 | vgic_dist_irq_clear_pending(vcpu, irq_num); | 1646 | vgic_dist_irq_clear_pending(vcpu, irq_num); |
1646 | } else { | ||
1647 | vgic_dist_irq_clear_pending(vcpu, irq_num); | ||
1648 | } | 1647 | } |
1648 | |||
1649 | ret = false; | ||
1650 | goto out; | ||
1649 | } | 1651 | } |
1650 | 1652 | ||
1651 | enabled = vgic_irq_is_enabled(vcpu, irq_num); | 1653 | enabled = vgic_irq_is_enabled(vcpu, irq_num); |
@@ -1672,7 +1674,7 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid, | |||
1672 | out: | 1674 | out: |
1673 | spin_unlock(&dist->lock); | 1675 | spin_unlock(&dist->lock); |
1674 | 1676 | ||
1675 | return ret; | 1677 | return ret ? cpuid : -EINVAL; |
1676 | } | 1678 | } |
1677 | 1679 | ||
1678 | /** | 1680 | /** |
@@ -1692,11 +1694,26 @@ out: | |||
1692 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, | 1694 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, |
1693 | bool level) | 1695 | bool level) |
1694 | { | 1696 | { |
1695 | if (likely(vgic_initialized(kvm)) && | 1697 | int ret = 0; |
1696 | vgic_update_irq_pending(kvm, cpuid, irq_num, level)) | 1698 | int vcpu_id; |
1697 | vgic_kick_vcpus(kvm); | ||
1698 | 1699 | ||
1699 | return 0; | 1700 | if (unlikely(!vgic_initialized(kvm))) { |
1701 | mutex_lock(&kvm->lock); | ||
1702 | ret = vgic_init(kvm); | ||
1703 | mutex_unlock(&kvm->lock); | ||
1704 | |||
1705 | if (ret) | ||
1706 | goto out; | ||
1707 | } | ||
1708 | |||
1709 | vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level); | ||
1710 | if (vcpu_id >= 0) { | ||
1711 | /* kick the specified vcpu */ | ||
1712 | kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id)); | ||
1713 | } | ||
1714 | |||
1715 | out: | ||
1716 | return ret; | ||
1700 | } | 1717 | } |
1701 | 1718 | ||
1702 | static irqreturn_t vgic_maintenance_handler(int irq, void *data) | 1719 | static irqreturn_t vgic_maintenance_handler(int irq, void *data) |
@@ -1726,39 +1743,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) | |||
1726 | 1743 | ||
1727 | int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; | 1744 | int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; |
1728 | vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); | 1745 | vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); |
1729 | vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL); | 1746 | vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL); |
1730 | 1747 | ||
1731 | if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) { | 1748 | if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) { |
1732 | kvm_vgic_vcpu_destroy(vcpu); | 1749 | kvm_vgic_vcpu_destroy(vcpu); |
1733 | return -ENOMEM; | 1750 | return -ENOMEM; |
1734 | } | 1751 | } |
1735 | 1752 | ||
1736 | return 0; | 1753 | memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs); |
1737 | } | ||
1738 | |||
1739 | /** | ||
1740 | * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state | ||
1741 | * @vcpu: pointer to the vcpu struct | ||
1742 | * | ||
1743 | * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to | ||
1744 | * this vcpu and enable the VGIC for this VCPU | ||
1745 | */ | ||
1746 | static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | ||
1747 | { | ||
1748 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
1749 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
1750 | int i; | ||
1751 | |||
1752 | for (i = 0; i < dist->nr_irqs; i++) { | ||
1753 | if (i < VGIC_NR_PPIS) | ||
1754 | vgic_bitmap_set_irq_val(&dist->irq_enabled, | ||
1755 | vcpu->vcpu_id, i, 1); | ||
1756 | if (i < VGIC_NR_PRIVATE_IRQS) | ||
1757 | vgic_bitmap_set_irq_val(&dist->irq_cfg, | ||
1758 | vcpu->vcpu_id, i, VGIC_CFG_EDGE); | ||
1759 | |||
1760 | vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY; | ||
1761 | } | ||
1762 | 1754 | ||
1763 | /* | 1755 | /* |
1764 | * Store the number of LRs per vcpu, so we don't have to go | 1756 | * Store the number of LRs per vcpu, so we don't have to go |
@@ -1767,7 +1759,7 @@ static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
1767 | */ | 1759 | */ |
1768 | vgic_cpu->nr_lr = vgic->nr_lr; | 1760 | vgic_cpu->nr_lr = vgic->nr_lr; |
1769 | 1761 | ||
1770 | vgic_enable(vcpu); | 1762 | return 0; |
1771 | } | 1763 | } |
1772 | 1764 | ||
1773 | void kvm_vgic_destroy(struct kvm *kvm) | 1765 | void kvm_vgic_destroy(struct kvm *kvm) |
@@ -1798,20 +1790,21 @@ void kvm_vgic_destroy(struct kvm *kvm) | |||
1798 | dist->irq_spi_cpu = NULL; | 1790 | dist->irq_spi_cpu = NULL; |
1799 | dist->irq_spi_target = NULL; | 1791 | dist->irq_spi_target = NULL; |
1800 | dist->irq_pending_on_cpu = NULL; | 1792 | dist->irq_pending_on_cpu = NULL; |
1793 | dist->nr_cpus = 0; | ||
1801 | } | 1794 | } |
1802 | 1795 | ||
1803 | /* | 1796 | /* |
1804 | * Allocate and initialize the various data structures. Must be called | 1797 | * Allocate and initialize the various data structures. Must be called |
1805 | * with kvm->lock held! | 1798 | * with kvm->lock held! |
1806 | */ | 1799 | */ |
1807 | static int vgic_init_maps(struct kvm *kvm) | 1800 | static int vgic_init(struct kvm *kvm) |
1808 | { | 1801 | { |
1809 | struct vgic_dist *dist = &kvm->arch.vgic; | 1802 | struct vgic_dist *dist = &kvm->arch.vgic; |
1810 | struct kvm_vcpu *vcpu; | 1803 | struct kvm_vcpu *vcpu; |
1811 | int nr_cpus, nr_irqs; | 1804 | int nr_cpus, nr_irqs; |
1812 | int ret, i; | 1805 | int ret, i, vcpu_id; |
1813 | 1806 | ||
1814 | if (dist->nr_cpus) /* Already allocated */ | 1807 | if (vgic_initialized(kvm)) |
1815 | return 0; | 1808 | return 0; |
1816 | 1809 | ||
1817 | nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus); | 1810 | nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus); |
@@ -1859,16 +1852,28 @@ static int vgic_init_maps(struct kvm *kvm) | |||
1859 | if (ret) | 1852 | if (ret) |
1860 | goto out; | 1853 | goto out; |
1861 | 1854 | ||
1862 | kvm_for_each_vcpu(i, vcpu, kvm) { | 1855 | for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4) |
1856 | vgic_set_target_reg(kvm, 0, i); | ||
1857 | |||
1858 | kvm_for_each_vcpu(vcpu_id, vcpu, kvm) { | ||
1863 | ret = vgic_vcpu_init_maps(vcpu, nr_irqs); | 1859 | ret = vgic_vcpu_init_maps(vcpu, nr_irqs); |
1864 | if (ret) { | 1860 | if (ret) { |
1865 | kvm_err("VGIC: Failed to allocate vcpu memory\n"); | 1861 | kvm_err("VGIC: Failed to allocate vcpu memory\n"); |
1866 | break; | 1862 | break; |
1867 | } | 1863 | } |
1868 | } | ||
1869 | 1864 | ||
1870 | for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4) | 1865 | for (i = 0; i < dist->nr_irqs; i++) { |
1871 | vgic_set_target_reg(kvm, 0, i); | 1866 | if (i < VGIC_NR_PPIS) |
1867 | vgic_bitmap_set_irq_val(&dist->irq_enabled, | ||
1868 | vcpu->vcpu_id, i, 1); | ||
1869 | if (i < VGIC_NR_PRIVATE_IRQS) | ||
1870 | vgic_bitmap_set_irq_val(&dist->irq_cfg, | ||
1871 | vcpu->vcpu_id, i, | ||
1872 | VGIC_CFG_EDGE); | ||
1873 | } | ||
1874 | |||
1875 | vgic_enable(vcpu); | ||
1876 | } | ||
1872 | 1877 | ||
1873 | out: | 1878 | out: |
1874 | if (ret) | 1879 | if (ret) |
@@ -1878,25 +1883,23 @@ out: | |||
1878 | } | 1883 | } |
1879 | 1884 | ||
1880 | /** | 1885 | /** |
1881 | * kvm_vgic_init - Initialize global VGIC state before running any VCPUs | 1886 | * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs |
1882 | * @kvm: pointer to the kvm struct | 1887 | * @kvm: pointer to the kvm struct |
1883 | * | 1888 | * |
1884 | * Map the virtual CPU interface into the VM before running any VCPUs. We | 1889 | * Map the virtual CPU interface into the VM before running any VCPUs. We |
1885 | * can't do this at creation time, because user space must first set the | 1890 | * can't do this at creation time, because user space must first set the |
1886 | * virtual CPU interface address in the guest physical address space. Also | 1891 | * virtual CPU interface address in the guest physical address space. |
1887 | * initialize the ITARGETSRn regs to 0 on the emulated distributor. | ||
1888 | */ | 1892 | */ |
1889 | int kvm_vgic_init(struct kvm *kvm) | 1893 | int kvm_vgic_map_resources(struct kvm *kvm) |
1890 | { | 1894 | { |
1891 | struct kvm_vcpu *vcpu; | 1895 | int ret = 0; |
1892 | int ret = 0, i; | ||
1893 | 1896 | ||
1894 | if (!irqchip_in_kernel(kvm)) | 1897 | if (!irqchip_in_kernel(kvm)) |
1895 | return 0; | 1898 | return 0; |
1896 | 1899 | ||
1897 | mutex_lock(&kvm->lock); | 1900 | mutex_lock(&kvm->lock); |
1898 | 1901 | ||
1899 | if (vgic_initialized(kvm)) | 1902 | if (vgic_ready(kvm)) |
1900 | goto out; | 1903 | goto out; |
1901 | 1904 | ||
1902 | if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) || | 1905 | if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) || |
@@ -1906,7 +1909,11 @@ int kvm_vgic_init(struct kvm *kvm) | |||
1906 | goto out; | 1909 | goto out; |
1907 | } | 1910 | } |
1908 | 1911 | ||
1909 | ret = vgic_init_maps(kvm); | 1912 | /* |
1913 | * Initialize the vgic if this hasn't already been done on demand by | ||
1914 | * accessing the vgic state from userspace. | ||
1915 | */ | ||
1916 | ret = vgic_init(kvm); | ||
1910 | if (ret) { | 1917 | if (ret) { |
1911 | kvm_err("Unable to allocate maps\n"); | 1918 | kvm_err("Unable to allocate maps\n"); |
1912 | goto out; | 1919 | goto out; |
@@ -1920,9 +1927,6 @@ int kvm_vgic_init(struct kvm *kvm) | |||
1920 | goto out; | 1927 | goto out; |
1921 | } | 1928 | } |
1922 | 1929 | ||
1923 | kvm_for_each_vcpu(i, vcpu, kvm) | ||
1924 | kvm_vgic_vcpu_init(vcpu); | ||
1925 | |||
1926 | kvm->arch.vgic.ready = true; | 1930 | kvm->arch.vgic.ready = true; |
1927 | out: | 1931 | out: |
1928 | if (ret) | 1932 | if (ret) |
@@ -2167,7 +2171,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev, | |||
2167 | 2171 | ||
2168 | mutex_lock(&dev->kvm->lock); | 2172 | mutex_lock(&dev->kvm->lock); |
2169 | 2173 | ||
2170 | ret = vgic_init_maps(dev->kvm); | 2174 | ret = vgic_init(dev->kvm); |
2171 | if (ret) | 2175 | if (ret) |
2172 | goto out; | 2176 | goto out; |
2173 | 2177 | ||
@@ -2289,7 +2293,7 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
2289 | 2293 | ||
2290 | mutex_lock(&dev->kvm->lock); | 2294 | mutex_lock(&dev->kvm->lock); |
2291 | 2295 | ||
2292 | if (vgic_initialized(dev->kvm) || dev->kvm->arch.vgic.nr_irqs) | 2296 | if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs) |
2293 | ret = -EBUSY; | 2297 | ret = -EBUSY; |
2294 | else | 2298 | else |
2295 | dev->kvm->arch.vgic.nr_irqs = val; | 2299 | dev->kvm->arch.vgic.nr_irqs = val; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c5c186af823b..f5283438ee05 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting); | |||
107 | 107 | ||
108 | static bool largepages_enabled = true; | 108 | static bool largepages_enabled = true; |
109 | 109 | ||
110 | bool kvm_is_mmio_pfn(pfn_t pfn) | 110 | bool kvm_is_reserved_pfn(pfn_t pfn) |
111 | { | 111 | { |
112 | if (pfn_valid(pfn)) | 112 | if (pfn_valid(pfn)) |
113 | return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); | 113 | return PageReserved(pfn_to_page(pfn)); |
114 | 114 | ||
115 | return true; | 115 | return true; |
116 | } | 116 | } |
@@ -1301,7 +1301,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, | |||
1301 | else if ((vma->vm_flags & VM_PFNMAP)) { | 1301 | else if ((vma->vm_flags & VM_PFNMAP)) { |
1302 | pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + | 1302 | pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + |
1303 | vma->vm_pgoff; | 1303 | vma->vm_pgoff; |
1304 | BUG_ON(!kvm_is_mmio_pfn(pfn)); | 1304 | BUG_ON(!kvm_is_reserved_pfn(pfn)); |
1305 | } else { | 1305 | } else { |
1306 | if (async && vma_is_valid(vma, write_fault)) | 1306 | if (async && vma_is_valid(vma, write_fault)) |
1307 | *async = true; | 1307 | *async = true; |
@@ -1407,7 +1407,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn) | |||
1407 | if (is_error_noslot_pfn(pfn)) | 1407 | if (is_error_noslot_pfn(pfn)) |
1408 | return KVM_ERR_PTR_BAD_PAGE; | 1408 | return KVM_ERR_PTR_BAD_PAGE; |
1409 | 1409 | ||
1410 | if (kvm_is_mmio_pfn(pfn)) { | 1410 | if (kvm_is_reserved_pfn(pfn)) { |
1411 | WARN_ON(1); | 1411 | WARN_ON(1); |
1412 | return KVM_ERR_PTR_BAD_PAGE; | 1412 | return KVM_ERR_PTR_BAD_PAGE; |
1413 | } | 1413 | } |
@@ -1436,7 +1436,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean); | |||
1436 | 1436 | ||
1437 | void kvm_release_pfn_clean(pfn_t pfn) | 1437 | void kvm_release_pfn_clean(pfn_t pfn) |
1438 | { | 1438 | { |
1439 | if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) | 1439 | if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) |
1440 | put_page(pfn_to_page(pfn)); | 1440 | put_page(pfn_to_page(pfn)); |
1441 | } | 1441 | } |
1442 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); | 1442 | EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); |
@@ -1457,7 +1457,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn) | |||
1457 | 1457 | ||
1458 | void kvm_set_pfn_dirty(pfn_t pfn) | 1458 | void kvm_set_pfn_dirty(pfn_t pfn) |
1459 | { | 1459 | { |
1460 | if (!kvm_is_mmio_pfn(pfn)) { | 1460 | if (!kvm_is_reserved_pfn(pfn)) { |
1461 | struct page *page = pfn_to_page(pfn); | 1461 | struct page *page = pfn_to_page(pfn); |
1462 | if (!PageReserved(page)) | 1462 | if (!PageReserved(page)) |
1463 | SetPageDirty(page); | 1463 | SetPageDirty(page); |
@@ -1467,14 +1467,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); | |||
1467 | 1467 | ||
1468 | void kvm_set_pfn_accessed(pfn_t pfn) | 1468 | void kvm_set_pfn_accessed(pfn_t pfn) |
1469 | { | 1469 | { |
1470 | if (!kvm_is_mmio_pfn(pfn)) | 1470 | if (!kvm_is_reserved_pfn(pfn)) |
1471 | mark_page_accessed(pfn_to_page(pfn)); | 1471 | mark_page_accessed(pfn_to_page(pfn)); |
1472 | } | 1472 | } |
1473 | EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); | 1473 | EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); |
1474 | 1474 | ||
1475 | void kvm_get_pfn(pfn_t pfn) | 1475 | void kvm_get_pfn(pfn_t pfn) |
1476 | { | 1476 | { |
1477 | if (!kvm_is_mmio_pfn(pfn)) | 1477 | if (!kvm_is_reserved_pfn(pfn)) |
1478 | get_page(pfn_to_page(pfn)); | 1478 | get_page(pfn_to_page(pfn)); |
1479 | } | 1479 | } |
1480 | EXPORT_SYMBOL_GPL(kvm_get_pfn); | 1480 | EXPORT_SYMBOL_GPL(kvm_get_pfn); |