aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-08-05 03:47:45 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-08-05 03:47:45 -0400
commit5d5768660539b6d0da0d46113ffb0676540579a6 (patch)
treec0cd7a918fc7371c5f5b1b9b04c6358966850277 /arch/arm/kvm
parent5167d09ffad5b16b574d35ce3047ed34caf1e837 (diff)
parentdedf97e8ff2c7513b1370e36b56e08b6bd0f0290 (diff)
Merge tag 'kvm-arm-for-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm
KVM/ARM New features for 3.17 include: - Fixes and code refactoring for stage2 kvm MMU unmap_range - Support unmapping IPAs on deleting memslots for arm and arm64 - Support MMIO mappings in stage2 faults - KVM VGIC v2 emulation on GICv3 hardware - Big-Endian support for arm/arm64 (guest and host) - Debug Architecture support for arm64 (arm32 is on Christoffer's todo list) Conflicts: virt/kvm/arm/vgic.c [last minute cherry-pick from 3.17 to 3.16]
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/Kconfig2
-rw-r--r--arch/arm/kvm/Makefile1
-rw-r--r--arch/arm/kvm/arm.c37
-rw-r--r--arch/arm/kvm/coproc.c88
-rw-r--r--arch/arm/kvm/guest.c10
-rw-r--r--arch/arm/kvm/init.S4
-rw-r--r--arch/arm/kvm/interrupts.S9
-rw-r--r--arch/arm/kvm/interrupts_head.S48
-rw-r--r--arch/arm/kvm/mmu.c214
9 files changed, 259 insertions, 154 deletions
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 4be5bb150bdd..466bd299b1a8 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -23,7 +23,7 @@ config KVM
23 select HAVE_KVM_CPU_RELAX_INTERCEPT 23 select HAVE_KVM_CPU_RELAX_INTERCEPT
24 select KVM_MMIO 24 select KVM_MMIO
25 select KVM_ARM_HOST 25 select KVM_ARM_HOST
26 depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN 26 depends on ARM_VIRT_EXT && ARM_LPAE
27 ---help--- 27 ---help---
28 Support hosting virtualized guest machines. You will also 28 Support hosting virtualized guest machines. You will also
29 need to select one or more of the processor modules below. 29 need to select one or more of the processor modules below.
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 789bca9e64a7..f7057ed045b6 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -21,4 +21,5 @@ obj-y += kvm-arm.o init.o interrupts.o
21obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o 21obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
22obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o 22obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
23obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o 23obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
24obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
24obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o 25obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 3c82b37c0f9e..d7424ef80354 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -155,16 +155,6 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
155 return VM_FAULT_SIGBUS; 155 return VM_FAULT_SIGBUS;
156} 156}
157 157
158void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
159 struct kvm_memory_slot *dont)
160{
161}
162
163int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
164 unsigned long npages)
165{
166 return 0;
167}
168 158
169/** 159/**
170 * kvm_arch_destroy_vm - destroy the VM data structure 160 * kvm_arch_destroy_vm - destroy the VM data structure
@@ -225,33 +215,6 @@ long kvm_arch_dev_ioctl(struct file *filp,
225 return -EINVAL; 215 return -EINVAL;
226} 216}
227 217
228void kvm_arch_memslots_updated(struct kvm *kvm)
229{
230}
231
232int kvm_arch_prepare_memory_region(struct kvm *kvm,
233 struct kvm_memory_slot *memslot,
234 struct kvm_userspace_memory_region *mem,
235 enum kvm_mr_change change)
236{
237 return 0;
238}
239
240void kvm_arch_commit_memory_region(struct kvm *kvm,
241 struct kvm_userspace_memory_region *mem,
242 const struct kvm_memory_slot *old,
243 enum kvm_mr_change change)
244{
245}
246
247void kvm_arch_flush_shadow_all(struct kvm *kvm)
248{
249}
250
251void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
252 struct kvm_memory_slot *slot)
253{
254}
255 218
256struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 219struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
257{ 220{
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index c58a35116f63..37a0fe1bb9bb 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -44,6 +44,31 @@ static u32 cache_levels;
44/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ 44/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
45#define CSSELR_MAX 12 45#define CSSELR_MAX 12
46 46
47/*
48 * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
49 * of cp15 registers can be viewed either as couple of two u32 registers
50 * or one u64 register. Current u64 register encoding is that least
51 * significant u32 word is followed by most significant u32 word.
52 */
53static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
54 const struct coproc_reg *r,
55 u64 val)
56{
57 vcpu->arch.cp15[r->reg] = val & 0xffffffff;
58 vcpu->arch.cp15[r->reg + 1] = val >> 32;
59}
60
61static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
62 const struct coproc_reg *r)
63{
64 u64 val;
65
66 val = vcpu->arch.cp15[r->reg + 1];
67 val = val << 32;
68 val = val | vcpu->arch.cp15[r->reg];
69 return val;
70}
71
47int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) 72int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
48{ 73{
49 kvm_inject_undefined(vcpu); 74 kvm_inject_undefined(vcpu);
@@ -682,17 +707,23 @@ static struct coproc_reg invariant_cp15[] = {
682 { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, 707 { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
683}; 708};
684 709
710/*
711 * Reads a register value from a userspace address to a kernel
712 * variable. Make sure that register size matches sizeof(*__val).
713 */
685static int reg_from_user(void *val, const void __user *uaddr, u64 id) 714static int reg_from_user(void *val, const void __user *uaddr, u64 id)
686{ 715{
687 /* This Just Works because we are little endian. */
688 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) 716 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
689 return -EFAULT; 717 return -EFAULT;
690 return 0; 718 return 0;
691} 719}
692 720
721/*
722 * Writes a register value to a userspace address from a kernel variable.
723 * Make sure that register size matches sizeof(*__val).
724 */
693static int reg_to_user(void __user *uaddr, const void *val, u64 id) 725static int reg_to_user(void __user *uaddr, const void *val, u64 id)
694{ 726{
695 /* This Just Works because we are little endian. */
696 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) 727 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
697 return -EFAULT; 728 return -EFAULT;
698 return 0; 729 return 0;
@@ -702,6 +733,7 @@ static int get_invariant_cp15(u64 id, void __user *uaddr)
702{ 733{
703 struct coproc_params params; 734 struct coproc_params params;
704 const struct coproc_reg *r; 735 const struct coproc_reg *r;
736 int ret;
705 737
706 if (!index_to_params(id, &params)) 738 if (!index_to_params(id, &params))
707 return -ENOENT; 739 return -ENOENT;
@@ -710,7 +742,15 @@ static int get_invariant_cp15(u64 id, void __user *uaddr)
710 if (!r) 742 if (!r)
711 return -ENOENT; 743 return -ENOENT;
712 744
713 return reg_to_user(uaddr, &r->val, id); 745 ret = -ENOENT;
746 if (KVM_REG_SIZE(id) == 4) {
747 u32 val = r->val;
748
749 ret = reg_to_user(uaddr, &val, id);
750 } else if (KVM_REG_SIZE(id) == 8) {
751 ret = reg_to_user(uaddr, &r->val, id);
752 }
753 return ret;
714} 754}
715 755
716static int set_invariant_cp15(u64 id, void __user *uaddr) 756static int set_invariant_cp15(u64 id, void __user *uaddr)
@@ -718,7 +758,7 @@ static int set_invariant_cp15(u64 id, void __user *uaddr)
718 struct coproc_params params; 758 struct coproc_params params;
719 const struct coproc_reg *r; 759 const struct coproc_reg *r;
720 int err; 760 int err;
721 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ 761 u64 val;
722 762
723 if (!index_to_params(id, &params)) 763 if (!index_to_params(id, &params))
724 return -ENOENT; 764 return -ENOENT;
@@ -726,7 +766,16 @@ static int set_invariant_cp15(u64 id, void __user *uaddr)
726 if (!r) 766 if (!r)
727 return -ENOENT; 767 return -ENOENT;
728 768
729 err = reg_from_user(&val, uaddr, id); 769 err = -ENOENT;
770 if (KVM_REG_SIZE(id) == 4) {
771 u32 val32;
772
773 err = reg_from_user(&val32, uaddr, id);
774 if (!err)
775 val = val32;
776 } else if (KVM_REG_SIZE(id) == 8) {
777 err = reg_from_user(&val, uaddr, id);
778 }
730 if (err) 779 if (err)
731 return err; 780 return err;
732 781
@@ -1004,6 +1053,7 @@ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1004{ 1053{
1005 const struct coproc_reg *r; 1054 const struct coproc_reg *r;
1006 void __user *uaddr = (void __user *)(long)reg->addr; 1055 void __user *uaddr = (void __user *)(long)reg->addr;
1056 int ret;
1007 1057
1008 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1058 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1009 return demux_c15_get(reg->id, uaddr); 1059 return demux_c15_get(reg->id, uaddr);
@@ -1015,14 +1065,24 @@ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1015 if (!r) 1065 if (!r)
1016 return get_invariant_cp15(reg->id, uaddr); 1066 return get_invariant_cp15(reg->id, uaddr);
1017 1067
1018 /* Note: copies two regs if size is 64 bit. */ 1068 ret = -ENOENT;
1019 return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); 1069 if (KVM_REG_SIZE(reg->id) == 8) {
1070 u64 val;
1071
1072 val = vcpu_cp15_reg64_get(vcpu, r);
1073 ret = reg_to_user(uaddr, &val, reg->id);
1074 } else if (KVM_REG_SIZE(reg->id) == 4) {
1075 ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
1076 }
1077
1078 return ret;
1020} 1079}
1021 1080
1022int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 1081int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1023{ 1082{
1024 const struct coproc_reg *r; 1083 const struct coproc_reg *r;
1025 void __user *uaddr = (void __user *)(long)reg->addr; 1084 void __user *uaddr = (void __user *)(long)reg->addr;
1085 int ret;
1026 1086
1027 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1087 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1028 return demux_c15_set(reg->id, uaddr); 1088 return demux_c15_set(reg->id, uaddr);
@@ -1034,8 +1094,18 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1034 if (!r) 1094 if (!r)
1035 return set_invariant_cp15(reg->id, uaddr); 1095 return set_invariant_cp15(reg->id, uaddr);
1036 1096
1037 /* Note: copies two regs if size is 64 bit */ 1097 ret = -ENOENT;
1038 return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); 1098 if (KVM_REG_SIZE(reg->id) == 8) {
1099 u64 val;
1100
1101 ret = reg_from_user(&val, uaddr, reg->id);
1102 if (!ret)
1103 vcpu_cp15_reg64_set(vcpu, r, val);
1104 } else if (KVM_REG_SIZE(reg->id) == 4) {
1105 ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
1106 }
1107
1108 return ret;
1039} 1109}
1040 1110
1041static unsigned int num_demux_regs(void) 1111static unsigned int num_demux_regs(void)
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index b23a59c1c522..986e625b5dbd 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -124,16 +124,6 @@ static bool is_timer_reg(u64 index)
124 return false; 124 return false;
125} 125}
126 126
127int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
128{
129 return 0;
130}
131
132u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
133{
134 return 0;
135}
136
137#else 127#else
138 128
139#define NUM_TIMER_REGS 3 129#define NUM_TIMER_REGS 3
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 1b9844d369cc..2cc14dfad049 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -71,7 +71,7 @@ __do_hyp_init:
71 bne phase2 @ Yes, second stage init 71 bne phase2 @ Yes, second stage init
72 72
73 @ Set the HTTBR to point to the hypervisor PGD pointer passed 73 @ Set the HTTBR to point to the hypervisor PGD pointer passed
74 mcrr p15, 4, r2, r3, c2 74 mcrr p15, 4, rr_lo_hi(r2, r3), c2
75 75
76 @ Set the HTCR and VTCR to the same shareability and cacheability 76 @ Set the HTCR and VTCR to the same shareability and cacheability
77 @ settings as the non-secure TTBCR and with T0SZ == 0. 77 @ settings as the non-secure TTBCR and with T0SZ == 0.
@@ -137,7 +137,7 @@ phase2:
137 mov pc, r0 137 mov pc, r0
138 138
139target: @ We're now in the trampoline code, switch page tables 139target: @ We're now in the trampoline code, switch page tables
140 mcrr p15, 4, r2, r3, c2 140 mcrr p15, 4, rr_lo_hi(r2, r3), c2
141 isb 141 isb
142 142
143 @ Invalidate the old TLBs 143 @ Invalidate the old TLBs
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 0d68d4073068..01dcb0e752d9 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -52,7 +52,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
52 dsb ishst 52 dsb ishst
53 add r0, r0, #KVM_VTTBR 53 add r0, r0, #KVM_VTTBR
54 ldrd r2, r3, [r0] 54 ldrd r2, r3, [r0]
55 mcrr p15, 6, r2, r3, c2 @ Write VTTBR 55 mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
56 isb 56 isb
57 mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) 57 mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
58 dsb ish 58 dsb ish
@@ -135,7 +135,7 @@ ENTRY(__kvm_vcpu_run)
135 ldr r1, [vcpu, #VCPU_KVM] 135 ldr r1, [vcpu, #VCPU_KVM]
136 add r1, r1, #KVM_VTTBR 136 add r1, r1, #KVM_VTTBR
137 ldrd r2, r3, [r1] 137 ldrd r2, r3, [r1]
138 mcrr p15, 6, r2, r3, c2 @ Write VTTBR 138 mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
139 139
140 @ We're all done, just restore the GPRs and go to the guest 140 @ We're all done, just restore the GPRs and go to the guest
141 restore_guest_regs 141 restore_guest_regs
@@ -199,8 +199,13 @@ after_vfp_restore:
199 199
200 restore_host_regs 200 restore_host_regs
201 clrex @ Clear exclusive monitor 201 clrex @ Clear exclusive monitor
202#ifndef CONFIG_CPU_ENDIAN_BE8
202 mov r0, r1 @ Return the return code 203 mov r0, r1 @ Return the return code
203 mov r1, #0 @ Clear upper bits in return value 204 mov r1, #0 @ Clear upper bits in return value
205#else
206 @ r1 already has return code
207 mov r0, #0 @ Clear upper bits in return value
208#endif /* CONFIG_CPU_ENDIAN_BE8 */
204 bx lr @ return to IOCTL 209 bx lr @ return to IOCTL
205 210
206/******************************************************************** 211/********************************************************************
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 76af93025574..98c8c5b9a87f 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -1,4 +1,5 @@
1#include <linux/irqchip/arm-gic.h> 1#include <linux/irqchip/arm-gic.h>
2#include <asm/assembler.h>
2 3
3#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) 4#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
4#define VCPU_USR_SP (VCPU_USR_REG(13)) 5#define VCPU_USR_SP (VCPU_USR_REG(13))
@@ -420,15 +421,23 @@ vcpu .req r0 @ vcpu pointer always in r0
420 ldr r8, [r2, #GICH_ELRSR0] 421 ldr r8, [r2, #GICH_ELRSR0]
421 ldr r9, [r2, #GICH_ELRSR1] 422 ldr r9, [r2, #GICH_ELRSR1]
422 ldr r10, [r2, #GICH_APR] 423 ldr r10, [r2, #GICH_APR]
423 424ARM_BE8(rev r3, r3 )
424 str r3, [r11, #VGIC_CPU_HCR] 425ARM_BE8(rev r4, r4 )
425 str r4, [r11, #VGIC_CPU_VMCR] 426ARM_BE8(rev r5, r5 )
426 str r5, [r11, #VGIC_CPU_MISR] 427ARM_BE8(rev r6, r6 )
427 str r6, [r11, #VGIC_CPU_EISR] 428ARM_BE8(rev r7, r7 )
428 str r7, [r11, #(VGIC_CPU_EISR + 4)] 429ARM_BE8(rev r8, r8 )
429 str r8, [r11, #VGIC_CPU_ELRSR] 430ARM_BE8(rev r9, r9 )
430 str r9, [r11, #(VGIC_CPU_ELRSR + 4)] 431ARM_BE8(rev r10, r10 )
431 str r10, [r11, #VGIC_CPU_APR] 432
433 str r3, [r11, #VGIC_V2_CPU_HCR]
434 str r4, [r11, #VGIC_V2_CPU_VMCR]
435 str r5, [r11, #VGIC_V2_CPU_MISR]
436 str r6, [r11, #VGIC_V2_CPU_EISR]
437 str r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
438 str r8, [r11, #VGIC_V2_CPU_ELRSR]
439 str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
440 str r10, [r11, #VGIC_V2_CPU_APR]
432 441
433 /* Clear GICH_HCR */ 442 /* Clear GICH_HCR */
434 mov r5, #0 443 mov r5, #0
@@ -436,9 +445,10 @@ vcpu .req r0 @ vcpu pointer always in r0
436 445
437 /* Save list registers */ 446 /* Save list registers */
438 add r2, r2, #GICH_LR0 447 add r2, r2, #GICH_LR0
439 add r3, r11, #VGIC_CPU_LR 448 add r3, r11, #VGIC_V2_CPU_LR
440 ldr r4, [r11, #VGIC_CPU_NR_LR] 449 ldr r4, [r11, #VGIC_CPU_NR_LR]
4411: ldr r6, [r2], #4 4501: ldr r6, [r2], #4
451ARM_BE8(rev r6, r6 )
442 str r6, [r3], #4 452 str r6, [r3], #4
443 subs r4, r4, #1 453 subs r4, r4, #1
444 bne 1b 454 bne 1b
@@ -463,9 +473,12 @@ vcpu .req r0 @ vcpu pointer always in r0
463 add r11, vcpu, #VCPU_VGIC_CPU 473 add r11, vcpu, #VCPU_VGIC_CPU
464 474
465 /* We only restore a minimal set of registers */ 475 /* We only restore a minimal set of registers */
466 ldr r3, [r11, #VGIC_CPU_HCR] 476 ldr r3, [r11, #VGIC_V2_CPU_HCR]
467 ldr r4, [r11, #VGIC_CPU_VMCR] 477 ldr r4, [r11, #VGIC_V2_CPU_VMCR]
468 ldr r8, [r11, #VGIC_CPU_APR] 478 ldr r8, [r11, #VGIC_V2_CPU_APR]
479ARM_BE8(rev r3, r3 )
480ARM_BE8(rev r4, r4 )
481ARM_BE8(rev r8, r8 )
469 482
470 str r3, [r2, #GICH_HCR] 483 str r3, [r2, #GICH_HCR]
471 str r4, [r2, #GICH_VMCR] 484 str r4, [r2, #GICH_VMCR]
@@ -473,9 +486,10 @@ vcpu .req r0 @ vcpu pointer always in r0
473 486
474 /* Restore list registers */ 487 /* Restore list registers */
475 add r2, r2, #GICH_LR0 488 add r2, r2, #GICH_LR0
476 add r3, r11, #VGIC_CPU_LR 489 add r3, r11, #VGIC_V2_CPU_LR
477 ldr r4, [r11, #VGIC_CPU_NR_LR] 490 ldr r4, [r11, #VGIC_CPU_NR_LR]
4781: ldr r6, [r3], #4 4911: ldr r6, [r3], #4
492ARM_BE8(rev r6, r6 )
479 str r6, [r2], #4 493 str r6, [r2], #4
480 subs r4, r4, #1 494 subs r4, r4, #1
481 bne 1b 495 bne 1b
@@ -506,7 +520,7 @@ vcpu .req r0 @ vcpu pointer always in r0
506 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL 520 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
507 isb 521 isb
508 522
509 mrrc p15, 3, r2, r3, c14 @ CNTV_CVAL 523 mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
510 ldr r4, =VCPU_TIMER_CNTV_CVAL 524 ldr r4, =VCPU_TIMER_CNTV_CVAL
511 add r5, vcpu, r4 525 add r5, vcpu, r4
512 strd r2, r3, [r5] 526 strd r2, r3, [r5]
@@ -546,12 +560,12 @@ vcpu .req r0 @ vcpu pointer always in r0
546 560
547 ldr r2, [r4, #KVM_TIMER_CNTVOFF] 561 ldr r2, [r4, #KVM_TIMER_CNTVOFF]
548 ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)] 562 ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
549 mcrr p15, 4, r2, r3, c14 @ CNTVOFF 563 mcrr p15, 4, rr_lo_hi(r2, r3), c14 @ CNTVOFF
550 564
551 ldr r4, =VCPU_TIMER_CNTV_CVAL 565 ldr r4, =VCPU_TIMER_CNTV_CVAL
552 add r5, vcpu, r4 566 add r5, vcpu, r4
553 ldrd r2, r3, [r5] 567 ldrd r2, r3, [r5]
554 mcrr p15, 3, r2, r3, c14 @ CNTV_CVAL 568 mcrr p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
555 isb 569 isb
556 570
557 ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL] 571 ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 16f804938b8f..16e7994bf347 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -90,104 +90,115 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
90 return p; 90 return p;
91} 91}
92 92
93static bool page_empty(void *ptr) 93static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
94{ 94{
95 struct page *ptr_page = virt_to_page(ptr); 95 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
96 return page_count(ptr_page) == 1; 96 pgd_clear(pgd);
97 kvm_tlb_flush_vmid_ipa(kvm, addr);
98 pud_free(NULL, pud_table);
99 put_page(virt_to_page(pgd));
97} 100}
98 101
99static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 102static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
100{ 103{
101 if (pud_huge(*pud)) { 104 pmd_t *pmd_table = pmd_offset(pud, 0);
102 pud_clear(pud); 105 VM_BUG_ON(pud_huge(*pud));
103 kvm_tlb_flush_vmid_ipa(kvm, addr); 106 pud_clear(pud);
104 } else { 107 kvm_tlb_flush_vmid_ipa(kvm, addr);
105 pmd_t *pmd_table = pmd_offset(pud, 0); 108 pmd_free(NULL, pmd_table);
106 pud_clear(pud);
107 kvm_tlb_flush_vmid_ipa(kvm, addr);
108 pmd_free(NULL, pmd_table);
109 }
110 put_page(virt_to_page(pud)); 109 put_page(virt_to_page(pud));
111} 110}
112 111
113static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) 112static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
114{ 113{
115 if (kvm_pmd_huge(*pmd)) { 114 pte_t *pte_table = pte_offset_kernel(pmd, 0);
116 pmd_clear(pmd); 115 VM_BUG_ON(kvm_pmd_huge(*pmd));
117 kvm_tlb_flush_vmid_ipa(kvm, addr); 116 pmd_clear(pmd);
118 } else { 117 kvm_tlb_flush_vmid_ipa(kvm, addr);
119 pte_t *pte_table = pte_offset_kernel(pmd, 0); 118 pte_free_kernel(NULL, pte_table);
120 pmd_clear(pmd);
121 kvm_tlb_flush_vmid_ipa(kvm, addr);
122 pte_free_kernel(NULL, pte_table);
123 }
124 put_page(virt_to_page(pmd)); 119 put_page(virt_to_page(pmd));
125} 120}
126 121
127static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) 122static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
123 phys_addr_t addr, phys_addr_t end)
128{ 124{
129 if (pte_present(*pte)) { 125 phys_addr_t start_addr = addr;
130 kvm_set_pte(pte, __pte(0)); 126 pte_t *pte, *start_pte;
131 put_page(virt_to_page(pte)); 127
132 kvm_tlb_flush_vmid_ipa(kvm, addr); 128 start_pte = pte = pte_offset_kernel(pmd, addr);
133 } 129 do {
130 if (!pte_none(*pte)) {
131 kvm_set_pte(pte, __pte(0));
132 put_page(virt_to_page(pte));
133 kvm_tlb_flush_vmid_ipa(kvm, addr);
134 }
135 } while (pte++, addr += PAGE_SIZE, addr != end);
136
137 if (kvm_pte_table_empty(start_pte))
138 clear_pmd_entry(kvm, pmd, start_addr);
134} 139}
135 140
136static void unmap_range(struct kvm *kvm, pgd_t *pgdp, 141static void unmap_pmds(struct kvm *kvm, pud_t *pud,
137 unsigned long long start, u64 size) 142 phys_addr_t addr, phys_addr_t end)
138{ 143{
139 pgd_t *pgd; 144 phys_addr_t next, start_addr = addr;
140 pud_t *pud; 145 pmd_t *pmd, *start_pmd;
141 pmd_t *pmd;
142 pte_t *pte;
143 unsigned long long addr = start, end = start + size;
144 u64 next;
145
146 while (addr < end) {
147 pgd = pgdp + pgd_index(addr);
148 pud = pud_offset(pgd, addr);
149 pte = NULL;
150 if (pud_none(*pud)) {
151 addr = kvm_pud_addr_end(addr, end);
152 continue;
153 }
154 146
155 if (pud_huge(*pud)) { 147 start_pmd = pmd = pmd_offset(pud, addr);
156 /* 148 do {
157 * If we are dealing with a huge pud, just clear it and 149 next = kvm_pmd_addr_end(addr, end);
158 * move on. 150 if (!pmd_none(*pmd)) {
159 */ 151 if (kvm_pmd_huge(*pmd)) {
160 clear_pud_entry(kvm, pud, addr); 152 pmd_clear(pmd);
161 addr = kvm_pud_addr_end(addr, end); 153 kvm_tlb_flush_vmid_ipa(kvm, addr);
162 continue; 154 put_page(virt_to_page(pmd));
155 } else {
156 unmap_ptes(kvm, pmd, addr, next);
157 }
163 } 158 }
159 } while (pmd++, addr = next, addr != end);
164 160
165 pmd = pmd_offset(pud, addr); 161 if (kvm_pmd_table_empty(start_pmd))
166 if (pmd_none(*pmd)) { 162 clear_pud_entry(kvm, pud, start_addr);
167 addr = kvm_pmd_addr_end(addr, end); 163}
168 continue;
169 }
170 164
171 if (!kvm_pmd_huge(*pmd)) { 165static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
172 pte = pte_offset_kernel(pmd, addr); 166 phys_addr_t addr, phys_addr_t end)
173 clear_pte_entry(kvm, pte, addr); 167{
174 next = addr + PAGE_SIZE; 168 phys_addr_t next, start_addr = addr;
175 } 169 pud_t *pud, *start_pud;
176 170
177 /* 171 start_pud = pud = pud_offset(pgd, addr);
178 * If the pmd entry is to be cleared, walk back up the ladder 172 do {
179 */ 173 next = kvm_pud_addr_end(addr, end);
180 if (kvm_pmd_huge(*pmd) || (pte && page_empty(pte))) { 174 if (!pud_none(*pud)) {
181 clear_pmd_entry(kvm, pmd, addr); 175 if (pud_huge(*pud)) {
182 next = kvm_pmd_addr_end(addr, end); 176 pud_clear(pud);
183 if (page_empty(pmd) && !page_empty(pud)) { 177 kvm_tlb_flush_vmid_ipa(kvm, addr);
184 clear_pud_entry(kvm, pud, addr); 178 put_page(virt_to_page(pud));
185 next = kvm_pud_addr_end(addr, end); 179 } else {
180 unmap_pmds(kvm, pud, addr, next);
186 } 181 }
187 } 182 }
183 } while (pud++, addr = next, addr != end);
188 184
189 addr = next; 185 if (kvm_pud_table_empty(start_pud))
190 } 186 clear_pgd_entry(kvm, pgd, start_addr);
187}
188
189
190static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
191 phys_addr_t start, u64 size)
192{
193 pgd_t *pgd;
194 phys_addr_t addr = start, end = start + size;
195 phys_addr_t next;
196
197 pgd = pgdp + pgd_index(addr);
198 do {
199 next = kvm_pgd_addr_end(addr, end);
200 unmap_puds(kvm, pgd, addr, next);
201 } while (pgd++, addr = next, addr != end);
191} 202}
192 203
193static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, 204static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
@@ -748,6 +759,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
748 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; 759 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
749 struct vm_area_struct *vma; 760 struct vm_area_struct *vma;
750 pfn_t pfn; 761 pfn_t pfn;
762 pgprot_t mem_type = PAGE_S2;
751 763
752 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); 764 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
753 if (fault_status == FSC_PERM && !write_fault) { 765 if (fault_status == FSC_PERM && !write_fault) {
@@ -798,6 +810,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
798 if (is_error_pfn(pfn)) 810 if (is_error_pfn(pfn))
799 return -EFAULT; 811 return -EFAULT;
800 812
813 if (kvm_is_mmio_pfn(pfn))
814 mem_type = PAGE_S2_DEVICE;
815
801 spin_lock(&kvm->mmu_lock); 816 spin_lock(&kvm->mmu_lock);
802 if (mmu_notifier_retry(kvm, mmu_seq)) 817 if (mmu_notifier_retry(kvm, mmu_seq))
803 goto out_unlock; 818 goto out_unlock;
@@ -805,7 +820,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
805 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); 820 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
806 821
807 if (hugetlb) { 822 if (hugetlb) {
808 pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2); 823 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
809 new_pmd = pmd_mkhuge(new_pmd); 824 new_pmd = pmd_mkhuge(new_pmd);
810 if (writable) { 825 if (writable) {
811 kvm_set_s2pmd_writable(&new_pmd); 826 kvm_set_s2pmd_writable(&new_pmd);
@@ -814,13 +829,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
814 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE); 829 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
815 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); 830 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
816 } else { 831 } else {
817 pte_t new_pte = pfn_pte(pfn, PAGE_S2); 832 pte_t new_pte = pfn_pte(pfn, mem_type);
818 if (writable) { 833 if (writable) {
819 kvm_set_s2pte_writable(&new_pte); 834 kvm_set_s2pte_writable(&new_pte);
820 kvm_set_pfn_dirty(pfn); 835 kvm_set_pfn_dirty(pfn);
821 } 836 }
822 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE); 837 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
823 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false); 838 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
839 mem_type == PAGE_S2_DEVICE);
824 } 840 }
825 841
826 842
@@ -1100,3 +1116,49 @@ out:
1100 free_hyp_pgds(); 1116 free_hyp_pgds();
1101 return err; 1117 return err;
1102} 1118}
1119
1120void kvm_arch_commit_memory_region(struct kvm *kvm,
1121 struct kvm_userspace_memory_region *mem,
1122 const struct kvm_memory_slot *old,
1123 enum kvm_mr_change change)
1124{
1125 gpa_t gpa = old->base_gfn << PAGE_SHIFT;
1126 phys_addr_t size = old->npages << PAGE_SHIFT;
1127 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1128 spin_lock(&kvm->mmu_lock);
1129 unmap_stage2_range(kvm, gpa, size);
1130 spin_unlock(&kvm->mmu_lock);
1131 }
1132}
1133
1134int kvm_arch_prepare_memory_region(struct kvm *kvm,
1135 struct kvm_memory_slot *memslot,
1136 struct kvm_userspace_memory_region *mem,
1137 enum kvm_mr_change change)
1138{
1139 return 0;
1140}
1141
1142void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1143 struct kvm_memory_slot *dont)
1144{
1145}
1146
1147int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1148 unsigned long npages)
1149{
1150 return 0;
1151}
1152
1153void kvm_arch_memslots_updated(struct kvm *kvm)
1154{
1155}
1156
1157void kvm_arch_flush_shadow_all(struct kvm *kvm)
1158{
1159}
1160
1161void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1162 struct kvm_memory_slot *slot)
1163{
1164}