aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 19:05:28 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 19:05:28 -0500
commit66dcff86ba40eebb5133cccf450878f2bba102ef (patch)
treee7eb49ad9316989a529b00303d2dd2cffa61a7f5 /arch/arm
parent91ed9e8a32d9a76adc59c83f8b40024076cf8a02 (diff)
parent2c4aa55a6af070262cca425745e8e54310e96b8d (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM update from Paolo Bonzini: "3.19 changes for KVM: - spring cleaning: removed support for IA64, and for hardware- assisted virtualization on the PPC970 - ARM, PPC, s390 all had only small fixes For x86: - small performance improvements (though only on weird guests) - usual round of hardware-compliancy fixes from Nadav - APICv fixes - XSAVES support for hosts and guests. XSAVES hosts were broken because the (non-KVM) XSAVES patches inadvertently changed the KVM userspace ABI whenever XSAVES was enabled; hence, this part is going to stable. Guest support is just a matter of exposing the feature and CPUID leaves support" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (179 commits) KVM: move APIC types to arch/x86/ KVM: PPC: Book3S: Enable in-kernel XICS emulation by default KVM: PPC: Book3S HV: Improve H_CONFER implementation KVM: PPC: Book3S HV: Fix endianness of instruction obtained from HEIR register KVM: PPC: Book3S HV: Remove code for PPC970 processors KVM: PPC: Book3S HV: Tracepoints for KVM HV guest interactions KVM: PPC: Book3S HV: Simplify locking around stolen time calculations arch: powerpc: kvm: book3s_paired_singles.c: Remove unused function arch: powerpc: kvm: book3s_pr.c: Remove unused function arch: powerpc: kvm: book3s.c: Remove some unused functions arch: powerpc: kvm: book3s_32_mmu.c: Remove unused function KVM: PPC: Book3S HV: Check wait conditions before sleeping in kvmppc_vcore_blocked KVM: PPC: Book3S HV: ptes are big endian KVM: PPC: Book3S HV: Fix inaccuracies in ICP emulation for H_IPI KVM: PPC: Book3S HV: Fix KSM memory corruption KVM: PPC: Book3S HV: Fix an issue where guest is paused on receiving HMI KVM: PPC: Book3S HV: Fix computation of tlbie operand KVM: PPC: Book3S HV: Add missing HPTE unlock KVM: PPC: BookE: Improve irq inject tracepoint arm/arm64: KVM: Require in-kernel vgic for the arch timers ...
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm/include/asm/kvm_host.h2
-rw-r--r--arch/arm/include/asm/kvm_mmu.h6
-rw-r--r--arch/arm/kvm/arm.c78
-rw-r--r--arch/arm/kvm/guest.c26
-rw-r--r--arch/arm/kvm/mmio.c15
-rw-r--r--arch/arm/kvm/mmu.c92
-rw-r--r--arch/arm/kvm/psci.c18
8 files changed, 195 insertions, 47 deletions
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index b9db269c6e61..66ce17655bb9 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
33void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 33void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
34void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 34void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
35 35
36static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
37{
38 vcpu->arch.hcr = HCR_GUEST_MASK;
39}
40
36static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) 41static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
37{ 42{
38 return 1; 43 return 1;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 53036e21756b..254e0650e48b 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -150,8 +150,6 @@ struct kvm_vcpu_stat {
150 u32 halt_wakeup; 150 u32 halt_wakeup;
151}; 151};
152 152
153int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
154 const struct kvm_vcpu_init *init);
155int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); 153int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
156unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 154unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
157int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 155int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index acb0d5712716..63e0ecc04901 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -52,6 +52,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
52void free_boot_hyp_pgd(void); 52void free_boot_hyp_pgd(void);
53void free_hyp_pgds(void); 53void free_hyp_pgds(void);
54 54
55void stage2_unmap_vm(struct kvm *kvm);
55int kvm_alloc_stage2_pgd(struct kvm *kvm); 56int kvm_alloc_stage2_pgd(struct kvm *kvm);
56void kvm_free_stage2_pgd(struct kvm *kvm); 57void kvm_free_stage2_pgd(struct kvm *kvm);
57int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, 58int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
@@ -161,9 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
161} 162}
162 163
163static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, 164static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
164 unsigned long size) 165 unsigned long size,
166 bool ipa_uncached)
165{ 167{
166 if (!vcpu_has_cache_enabled(vcpu)) 168 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
167 kvm_flush_dcache_to_poc((void *)hva, size); 169 kvm_flush_dcache_to_poc((void *)hva, size);
168 170
169 /* 171 /*
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 9e193c8a959e..2d6d91001062 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -213,6 +213,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
213 int err; 213 int err;
214 struct kvm_vcpu *vcpu; 214 struct kvm_vcpu *vcpu;
215 215
216 if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
217 err = -EBUSY;
218 goto out;
219 }
220
216 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 221 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
217 if (!vcpu) { 222 if (!vcpu) {
218 err = -ENOMEM; 223 err = -ENOMEM;
@@ -263,6 +268,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
263{ 268{
264 /* Force users to call KVM_ARM_VCPU_INIT */ 269 /* Force users to call KVM_ARM_VCPU_INIT */
265 vcpu->arch.target = -1; 270 vcpu->arch.target = -1;
271 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
266 272
267 /* Set up the timer */ 273 /* Set up the timer */
268 kvm_timer_vcpu_init(vcpu); 274 kvm_timer_vcpu_init(vcpu);
@@ -419,6 +425,7 @@ static void update_vttbr(struct kvm *kvm)
419 425
420static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 426static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
421{ 427{
428 struct kvm *kvm = vcpu->kvm;
422 int ret; 429 int ret;
423 430
424 if (likely(vcpu->arch.has_run_once)) 431 if (likely(vcpu->arch.has_run_once))
@@ -427,15 +434,23 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
427 vcpu->arch.has_run_once = true; 434 vcpu->arch.has_run_once = true;
428 435
429 /* 436 /*
430 * Initialize the VGIC before running a vcpu the first time on 437 * Map the VGIC hardware resources before running a vcpu the first
431 * this VM. 438 * time on this VM.
432 */ 439 */
433 if (unlikely(!vgic_initialized(vcpu->kvm))) { 440 if (unlikely(!vgic_ready(kvm))) {
434 ret = kvm_vgic_init(vcpu->kvm); 441 ret = kvm_vgic_map_resources(kvm);
435 if (ret) 442 if (ret)
436 return ret; 443 return ret;
437 } 444 }
438 445
446 /*
447 * Enable the arch timers only if we have an in-kernel VGIC
448 * and it has been properly initialized, since we cannot handle
449 * interrupts from the virtual timer with a userspace gic.
450 */
451 if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
452 kvm_timer_enable(kvm);
453
439 return 0; 454 return 0;
440} 455}
441 456
@@ -649,6 +664,48 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
649 return -EINVAL; 664 return -EINVAL;
650} 665}
651 666
667static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
668 const struct kvm_vcpu_init *init)
669{
670 unsigned int i;
671 int phys_target = kvm_target_cpu();
672
673 if (init->target != phys_target)
674 return -EINVAL;
675
676 /*
677 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
678 * use the same target.
679 */
680 if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
681 return -EINVAL;
682
683 /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
684 for (i = 0; i < sizeof(init->features) * 8; i++) {
685 bool set = (init->features[i / 32] & (1 << (i % 32)));
686
687 if (set && i >= KVM_VCPU_MAX_FEATURES)
688 return -ENOENT;
689
690 /*
691 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
692 * use the same feature set.
693 */
694 if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
695 test_bit(i, vcpu->arch.features) != set)
696 return -EINVAL;
697
698 if (set)
699 set_bit(i, vcpu->arch.features);
700 }
701
702 vcpu->arch.target = phys_target;
703
704 /* Now we know what it is, we can reset it. */
705 return kvm_reset_vcpu(vcpu);
706}
707
708
652static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, 709static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
653 struct kvm_vcpu_init *init) 710 struct kvm_vcpu_init *init)
654{ 711{
@@ -659,10 +716,21 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
659 return ret; 716 return ret;
660 717
661 /* 718 /*
719 * Ensure a rebooted VM will fault in RAM pages and detect if the
720 * guest MMU is turned off and flush the caches as needed.
721 */
722 if (vcpu->arch.has_run_once)
723 stage2_unmap_vm(vcpu->kvm);
724
725 vcpu_reset_hcr(vcpu);
726
727 /*
662 * Handle the "start in power-off" case by marking the VCPU as paused. 728 * Handle the "start in power-off" case by marking the VCPU as paused.
663 */ 729 */
664 if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) 730 if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
665 vcpu->arch.pause = true; 731 vcpu->arch.pause = true;
732 else
733 vcpu->arch.pause = false;
666 734
667 return 0; 735 return 0;
668} 736}
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index cc0b78769bd8..384bab67c462 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
38 38
39int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 39int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
40{ 40{
41 vcpu->arch.hcr = HCR_GUEST_MASK;
42 return 0; 41 return 0;
43} 42}
44 43
@@ -274,31 +273,6 @@ int __attribute_const__ kvm_target_cpu(void)
274 } 273 }
275} 274}
276 275
277int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
278 const struct kvm_vcpu_init *init)
279{
280 unsigned int i;
281
282 /* We can only cope with guest==host and only on A15/A7 (for now). */
283 if (init->target != kvm_target_cpu())
284 return -EINVAL;
285
286 vcpu->arch.target = init->target;
287 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
288
289 /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
290 for (i = 0; i < sizeof(init->features) * 8; i++) {
291 if (test_bit(i, (void *)init->features)) {
292 if (i >= KVM_VCPU_MAX_FEATURES)
293 return -ENOENT;
294 set_bit(i, vcpu->arch.features);
295 }
296 }
297
298 /* Now we know what it is, we can reset it. */
299 return kvm_reset_vcpu(vcpu);
300}
301
302int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) 276int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
303{ 277{
304 int target = kvm_target_cpu(); 278 int target = kvm_target_cpu();
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 4cb5a93182e9..5d3bfc0eb3f0 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -187,15 +187,18 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
187 } 187 }
188 188
189 rt = vcpu->arch.mmio_decode.rt; 189 rt = vcpu->arch.mmio_decode.rt;
190 data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len);
191 190
192 trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE : 191 if (mmio.is_write) {
193 KVM_TRACE_MMIO_READ_UNSATISFIED, 192 data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt),
194 mmio.len, fault_ipa, 193 mmio.len);
195 (mmio.is_write) ? data : 0);
196 194
197 if (mmio.is_write) 195 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len,
196 fault_ipa, data);
198 mmio_write_buf(mmio.data, mmio.len, data); 197 mmio_write_buf(mmio.data, mmio.len, data);
198 } else {
199 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len,
200 fault_ipa, 0);
201 }
199 202
200 if (vgic_handle_mmio(vcpu, run, &mmio)) 203 if (vgic_handle_mmio(vcpu, run, &mmio))
201 return 1; 204 return 1;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 8664ff17cbbe..1dc9778a00af 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -612,6 +612,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
612 unmap_range(kvm, kvm->arch.pgd, start, size); 612 unmap_range(kvm, kvm->arch.pgd, start, size);
613} 613}
614 614
615static void stage2_unmap_memslot(struct kvm *kvm,
616 struct kvm_memory_slot *memslot)
617{
618 hva_t hva = memslot->userspace_addr;
619 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
620 phys_addr_t size = PAGE_SIZE * memslot->npages;
621 hva_t reg_end = hva + size;
622
623 /*
624 * A memory region could potentially cover multiple VMAs, and any holes
625 * between them, so iterate over all of them to find out if we should
626 * unmap any of them.
627 *
628 * +--------------------------------------------+
629 * +---------------+----------------+ +----------------+
630 * | : VMA 1 | VMA 2 | | VMA 3 : |
631 * +---------------+----------------+ +----------------+
632 * | memory region |
633 * +--------------------------------------------+
634 */
635 do {
636 struct vm_area_struct *vma = find_vma(current->mm, hva);
637 hva_t vm_start, vm_end;
638
639 if (!vma || vma->vm_start >= reg_end)
640 break;
641
642 /*
643 * Take the intersection of this VMA with the memory region
644 */
645 vm_start = max(hva, vma->vm_start);
646 vm_end = min(reg_end, vma->vm_end);
647
648 if (!(vma->vm_flags & VM_PFNMAP)) {
649 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
650 unmap_stage2_range(kvm, gpa, vm_end - vm_start);
651 }
652 hva = vm_end;
653 } while (hva < reg_end);
654}
655
656/**
657 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
658 * @kvm: The struct kvm pointer
659 *
660 * Go through the memregions and unmap any reguler RAM
661 * backing memory already mapped to the VM.
662 */
663void stage2_unmap_vm(struct kvm *kvm)
664{
665 struct kvm_memslots *slots;
666 struct kvm_memory_slot *memslot;
667 int idx;
668
669 idx = srcu_read_lock(&kvm->srcu);
670 spin_lock(&kvm->mmu_lock);
671
672 slots = kvm_memslots(kvm);
673 kvm_for_each_memslot(memslot, slots)
674 stage2_unmap_memslot(kvm, memslot);
675
676 spin_unlock(&kvm->mmu_lock);
677 srcu_read_unlock(&kvm->srcu, idx);
678}
679
615/** 680/**
616 * kvm_free_stage2_pgd - free all stage-2 tables 681 * kvm_free_stage2_pgd - free all stage-2 tables
617 * @kvm: The KVM struct pointer for the VM. 682 * @kvm: The KVM struct pointer for the VM.
@@ -853,6 +918,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
853 struct vm_area_struct *vma; 918 struct vm_area_struct *vma;
854 pfn_t pfn; 919 pfn_t pfn;
855 pgprot_t mem_type = PAGE_S2; 920 pgprot_t mem_type = PAGE_S2;
921 bool fault_ipa_uncached;
856 922
857 write_fault = kvm_is_write_fault(vcpu); 923 write_fault = kvm_is_write_fault(vcpu);
858 if (fault_status == FSC_PERM && !write_fault) { 924 if (fault_status == FSC_PERM && !write_fault) {
@@ -919,6 +985,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
919 if (!hugetlb && !force_pte) 985 if (!hugetlb && !force_pte)
920 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); 986 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
921 987
988 fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
989
922 if (hugetlb) { 990 if (hugetlb) {
923 pmd_t new_pmd = pfn_pmd(pfn, mem_type); 991 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
924 new_pmd = pmd_mkhuge(new_pmd); 992 new_pmd = pmd_mkhuge(new_pmd);
@@ -926,7 +994,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
926 kvm_set_s2pmd_writable(&new_pmd); 994 kvm_set_s2pmd_writable(&new_pmd);
927 kvm_set_pfn_dirty(pfn); 995 kvm_set_pfn_dirty(pfn);
928 } 996 }
929 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE); 997 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
998 fault_ipa_uncached);
930 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); 999 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
931 } else { 1000 } else {
932 pte_t new_pte = pfn_pte(pfn, mem_type); 1001 pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -934,7 +1003,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
934 kvm_set_s2pte_writable(&new_pte); 1003 kvm_set_s2pte_writable(&new_pte);
935 kvm_set_pfn_dirty(pfn); 1004 kvm_set_pfn_dirty(pfn);
936 } 1005 }
937 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE); 1006 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
1007 fault_ipa_uncached);
938 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, 1008 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
939 pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); 1009 pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
940 } 1010 }
@@ -1294,11 +1364,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1294 hva = vm_end; 1364 hva = vm_end;
1295 } while (hva < reg_end); 1365 } while (hva < reg_end);
1296 1366
1297 if (ret) { 1367 spin_lock(&kvm->mmu_lock);
1298 spin_lock(&kvm->mmu_lock); 1368 if (ret)
1299 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size); 1369 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
1300 spin_unlock(&kvm->mmu_lock); 1370 else
1301 } 1371 stage2_flush_memslot(kvm, memslot);
1372 spin_unlock(&kvm->mmu_lock);
1302 return ret; 1373 return ret;
1303} 1374}
1304 1375
@@ -1310,6 +1381,15 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1310int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 1381int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1311 unsigned long npages) 1382 unsigned long npages)
1312{ 1383{
1384 /*
1385 * Readonly memslots are not incoherent with the caches by definition,
1386 * but in practice, they are used mostly to emulate ROMs or NOR flashes
1387 * that the guest may consider devices and hence map as uncached.
1388 * To prevent incoherency issues in these cases, tag all readonly
1389 * regions as incoherent.
1390 */
1391 if (slot->flags & KVM_MEM_READONLY)
1392 slot->flags |= KVM_MEMSLOT_INCOHERENT;
1313 return 0; 1393 return 0;
1314} 1394}
1315 1395
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 09cf37737ee2..58cb3248d277 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -15,6 +15,7 @@
15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/preempt.h>
18#include <linux/kvm_host.h> 19#include <linux/kvm_host.h>
19#include <linux/wait.h> 20#include <linux/wait.h>
20 21
@@ -166,6 +167,23 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
166 167
167static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type) 168static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
168{ 169{
170 int i;
171 struct kvm_vcpu *tmp;
172
173 /*
174 * The KVM ABI specifies that a system event exit may call KVM_RUN
175 * again and may perform shutdown/reboot at a later time that when the
176 * actual request is made. Since we are implementing PSCI and a
177 * caller of PSCI reboot and shutdown expects that the system shuts
178 * down or reboots immediately, let's make sure that VCPUs are not run
179 * after this call is handled and before the VCPUs have been
180 * re-initialized.
181 */
182 kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
183 tmp->arch.pause = true;
184 kvm_vcpu_kick(tmp);
185 }
186
169 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); 187 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
170 vcpu->run->system_event.type = type; 188 vcpu->run->system_event.type = type;
171 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 189 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;