aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
authorRadim Krčmář <rkrcmar@redhat.com>2016-09-29 10:01:51 -0400
committerRadim Krčmář <rkrcmar@redhat.com>2016-09-29 10:01:51 -0400
commit45ca877ad0519a02c22aaff2e2cdf333a1421a0a (patch)
treed7abbc767611379f878bf30b8c3b507d4e31982e /arch/arm/include
parentc5a6d5f7faad8549bb5ff7e3e5792e33933c5b9f (diff)
parent0099b7701f5296a758d9e6b945ec96f96847cc2f (diff)
Merge tag 'kvm-arm-for-v4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into next
KVM/ARM Changes for v4.9 - Various cleanups and removal of redundant code - Two important fixes for not using an in-kernel irqchip - A bit of optimizations - Handle SError exceptions and present them to guests if appropriate - Proxying of GICV access at EL2 if guest mappings are unsafe - GICv3 on AArch32 on ARMv8 - Preparations for GICv3 save/restore, including ABI docs
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/arch_gicv3.h100
-rw-r--r--arch/arm/include/asm/cp15.h15
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/kvm_asm.h7
-rw-r--r--arch/arm/include/asm/kvm_emulate.h35
-rw-r--r--arch/arm/include/asm/kvm_host.h5
-rw-r--r--arch/arm/include/asm/kvm_hyp.h18
-rw-r--r--arch/arm/include/asm/kvm_mmu.h28
-rw-r--r--arch/arm/include/uapi/asm/kvm.h7
9 files changed, 153 insertions, 63 deletions
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index e08d15184056..1fee657d3827 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -22,9 +22,7 @@
22 22
23#include <linux/io.h> 23#include <linux/io.h>
24#include <asm/barrier.h> 24#include <asm/barrier.h>
25 25#include <asm/cp15.h>
26#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
27#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
28 26
29#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1) 27#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1)
30#define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1) 28#define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1)
@@ -98,65 +96,135 @@
98#define ICH_AP1R2 __AP1Rx(2) 96#define ICH_AP1R2 __AP1Rx(2)
99#define ICH_AP1R3 __AP1Rx(3) 97#define ICH_AP1R3 __AP1Rx(3)
100 98
99/* A32-to-A64 mappings used by VGIC save/restore */
100
101#define CPUIF_MAP(a32, a64) \
102static inline void write_ ## a64(u32 val) \
103{ \
104 write_sysreg(val, a32); \
105} \
106static inline u32 read_ ## a64(void) \
107{ \
108 return read_sysreg(a32); \
109} \
110
111#define CPUIF_MAP_LO_HI(a32lo, a32hi, a64) \
112static inline void write_ ## a64(u64 val) \
113{ \
114 write_sysreg(lower_32_bits(val), a32lo);\
115 write_sysreg(upper_32_bits(val), a32hi);\
116} \
117static inline u64 read_ ## a64(void) \
118{ \
119 u64 val = read_sysreg(a32lo); \
120 \
121 val |= (u64)read_sysreg(a32hi) << 32; \
122 \
123 return val; \
124}
125
126CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
127CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
128CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
129CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
130CPUIF_MAP(ICH_ELSR, ICH_ELSR_EL2)
131CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
132CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
133CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
134CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2)
135CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2)
136CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2)
137CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2)
138CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2)
139CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2)
140CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2)
141CPUIF_MAP(ICC_SRE, ICC_SRE_EL1)
142
143CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2)
144CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2)
145CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2)
146CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2)
147CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2)
148CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2)
149CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2)
150CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2)
151CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2)
152CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2)
153CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2)
154CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2)
155CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2)
156CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2)
157CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2)
158CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2)
159
160#define read_gicreg(r) read_##r()
161#define write_gicreg(v, r) write_##r(v)
162
101/* Low-level accessors */ 163/* Low-level accessors */
102 164
103static inline void gic_write_eoir(u32 irq) 165static inline void gic_write_eoir(u32 irq)
104{ 166{
105 asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq)); 167 write_sysreg(irq, ICC_EOIR1);
106 isb(); 168 isb();
107} 169}
108 170
109static inline void gic_write_dir(u32 val) 171static inline void gic_write_dir(u32 val)
110{ 172{
111 asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val)); 173 write_sysreg(val, ICC_DIR);
112 isb(); 174 isb();
113} 175}
114 176
115static inline u32 gic_read_iar(void) 177static inline u32 gic_read_iar(void)
116{ 178{
117 u32 irqstat; 179 u32 irqstat = read_sysreg(ICC_IAR1);
118 180
119 asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
120 dsb(sy); 181 dsb(sy);
182
121 return irqstat; 183 return irqstat;
122} 184}
123 185
124static inline void gic_write_pmr(u32 val) 186static inline void gic_write_pmr(u32 val)
125{ 187{
126 asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val)); 188 write_sysreg(val, ICC_PMR);
127} 189}
128 190
129static inline void gic_write_ctlr(u32 val) 191static inline void gic_write_ctlr(u32 val)
130{ 192{
131 asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val)); 193 write_sysreg(val, ICC_CTLR);
132 isb(); 194 isb();
133} 195}
134 196
135static inline void gic_write_grpen1(u32 val) 197static inline void gic_write_grpen1(u32 val)
136{ 198{
137 asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val)); 199 write_sysreg(val, ICC_IGRPEN1);
138 isb(); 200 isb();
139} 201}
140 202
141static inline void gic_write_sgi1r(u64 val) 203static inline void gic_write_sgi1r(u64 val)
142{ 204{
143 asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val)); 205 write_sysreg(val, ICC_SGI1R);
144} 206}
145 207
146static inline u32 gic_read_sre(void) 208static inline u32 gic_read_sre(void)
147{ 209{
148 u32 val; 210 return read_sysreg(ICC_SRE);
149
150 asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val));
151 return val;
152} 211}
153 212
154static inline void gic_write_sre(u32 val) 213static inline void gic_write_sre(u32 val)
155{ 214{
156 asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val)); 215 write_sysreg(val, ICC_SRE);
157 isb(); 216 isb();
158} 217}
159 218
219static inline void gic_write_bpr1(u32 val)
220{
221#if defined(__write_sysreg) && defined(ICC_BPR1)
222 write_sysreg(val, ICC_BPR1);
223#else
224 asm volatile("mcr " __stringify(ICC_BPR1) : : "r" (val));
225#endif
226}
227
160/* 228/*
161 * Even in 32bit systems that use LPAE, there is no guarantee that the I/O 229 * Even in 32bit systems that use LPAE, there is no guarantee that the I/O
162 * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't 230 * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index c3f11524f10c..dbdbce1b3a72 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -49,6 +49,21 @@
49 49
50#ifdef CONFIG_CPU_CP15 50#ifdef CONFIG_CPU_CP15
51 51
52#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
53 "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
54#define __ACCESS_CP15_64(Op1, CRm) \
55 "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
56
57#define __read_sysreg(r, w, c, t) ({ \
58 t __val; \
59 asm volatile(r " " c : "=r" (__val)); \
60 __val; \
61})
62#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
63
64#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
65#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
66
52extern unsigned long cr_alignment; /* defined in entry-armv.S */ 67extern unsigned long cr_alignment; /* defined in entry-armv.S */
53 68
54static inline unsigned long get_cr(void) 69static inline unsigned long get_cr(void)
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 1ee94c716a7f..e2d94c1b07b8 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -55,6 +55,7 @@
55 55
56#define MPIDR_LEVEL_BITS 8 56#define MPIDR_LEVEL_BITS 8
57#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) 57#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
58#define MPIDR_LEVEL_SHIFT(level) (MPIDR_LEVEL_BITS * level)
58 59
59#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ 60#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
60 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) 61 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 58faff5f1eb2..d7ea6bcb29bf 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -21,6 +21,10 @@
21 21
22#include <asm/virt.h> 22#include <asm/virt.h>
23 23
24#define ARM_EXIT_WITH_ABORT_BIT 31
25#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT))
26#define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT))
27
24#define ARM_EXCEPTION_RESET 0 28#define ARM_EXCEPTION_RESET 0
25#define ARM_EXCEPTION_UNDEFINED 1 29#define ARM_EXCEPTION_UNDEFINED 1
26#define ARM_EXCEPTION_SOFTWARE 2 30#define ARM_EXCEPTION_SOFTWARE 2
@@ -68,6 +72,9 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
68extern void __init_stage2_translation(void); 72extern void __init_stage2_translation(void);
69 73
70extern void __kvm_hyp_reset(unsigned long); 74extern void __kvm_hyp_reset(unsigned long);
75
76extern u64 __vgic_v3_get_ich_vtr_el2(void);
77extern void __vgic_v3_init_lrs(void);
71#endif 78#endif
72 79
73#endif /* __ARM_KVM_ASM_H__ */ 80#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index ee5328fc4b06..9a8a45aaf19a 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -40,18 +40,29 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
40 *vcpu_reg(vcpu, reg_num) = val; 40 *vcpu_reg(vcpu, reg_num) = val;
41} 41}
42 42
43bool kvm_condition_valid(struct kvm_vcpu *vcpu); 43bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
44void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); 44void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
45void kvm_inject_undefined(struct kvm_vcpu *vcpu); 45void kvm_inject_undefined(struct kvm_vcpu *vcpu);
46void kvm_inject_vabt(struct kvm_vcpu *vcpu);
46void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 47void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
47void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 48void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
48 49
50static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
51{
52 return kvm_condition_valid32(vcpu);
53}
54
55static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
56{
57 kvm_skip_instr32(vcpu, is_wide_instr);
58}
59
49static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) 60static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
50{ 61{
51 vcpu->arch.hcr = HCR_GUEST_MASK; 62 vcpu->arch.hcr = HCR_GUEST_MASK;
52} 63}
53 64
54static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) 65static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu)
55{ 66{
56 return vcpu->arch.hcr; 67 return vcpu->arch.hcr;
57} 68}
@@ -61,7 +72,7 @@ static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
61 vcpu->arch.hcr = hcr; 72 vcpu->arch.hcr = hcr;
62} 73}
63 74
64static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) 75static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
65{ 76{
66 return 1; 77 return 1;
67} 78}
@@ -71,9 +82,9 @@ static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
71 return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; 82 return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
72} 83}
73 84
74static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) 85static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
75{ 86{
76 return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; 87 return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
77} 88}
78 89
79static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) 90static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -93,11 +104,21 @@ static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
93 return cpsr_mode > USR_MODE;; 104 return cpsr_mode > USR_MODE;;
94} 105}
95 106
96static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) 107static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
97{ 108{
98 return vcpu->arch.fault.hsr; 109 return vcpu->arch.fault.hsr;
99} 110}
100 111
112static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
113{
114 u32 hsr = kvm_vcpu_get_hsr(vcpu);
115
116 if (hsr & HSR_CV)
117 return (hsr & HSR_COND) >> HSR_COND_SHIFT;
118
119 return -1;
120}
121
101static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) 122static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
102{ 123{
103 return vcpu->arch.fault.hxfar; 124 return vcpu->arch.fault.hxfar;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 6ad21f04a922..2d19e02d03fd 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -39,7 +39,12 @@
39 39
40#include <kvm/arm_vgic.h> 40#include <kvm/arm_vgic.h>
41 41
42
43#ifdef CONFIG_ARM_GIC_V3
44#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
45#else
42#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS 46#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
47#endif
43 48
44#define KVM_REQ_VCPU_EXIT 8 49#define KVM_REQ_VCPU_EXIT 8
45 50
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index 6eaff28f2ff3..343135ede5fa 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -20,28 +20,15 @@
20 20
21#include <linux/compiler.h> 21#include <linux/compiler.h>
22#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
23#include <asm/cp15.h>
23#include <asm/kvm_mmu.h> 24#include <asm/kvm_mmu.h>
24#include <asm/vfp.h> 25#include <asm/vfp.h>
25 26
26#define __hyp_text __section(.hyp.text) notrace 27#define __hyp_text __section(.hyp.text) notrace
27 28
28#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
29 "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
30#define __ACCESS_CP15_64(Op1, CRm) \
31 "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
32#define __ACCESS_VFP(CRn) \ 29#define __ACCESS_VFP(CRn) \
33 "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32 30 "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
34 31
35#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
36#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
37
38#define __read_sysreg(r, w, c, t) ({ \
39 t __val; \
40 asm volatile(r " " c : "=r" (__val)); \
41 __val; \
42})
43#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
44
45#define write_special(v, r) \ 32#define write_special(v, r) \
46 asm volatile("msr " __stringify(r) ", %0" : : "r" (v)) 33 asm volatile("msr " __stringify(r) ", %0" : : "r" (v))
47#define read_special(r) ({ \ 34#define read_special(r) ({ \
@@ -119,6 +106,9 @@ void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
119void __sysreg_save_state(struct kvm_cpu_context *ctxt); 106void __sysreg_save_state(struct kvm_cpu_context *ctxt);
120void __sysreg_restore_state(struct kvm_cpu_context *ctxt); 107void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
121 108
109void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
110void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
111
122void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); 112void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp);
123void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); 113void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp);
124static inline bool __vfp_enabled(void) 114static inline bool __vfp_enabled(void)
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 3bb803d6814b..74a44727f8e1 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -63,37 +63,13 @@ void kvm_clear_hyp_idmap(void);
63static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd) 63static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
64{ 64{
65 *pmd = new_pmd; 65 *pmd = new_pmd;
66 flush_pmd_entry(pmd); 66 dsb(ishst);
67} 67}
68 68
69static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) 69static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
70{ 70{
71 *pte = new_pte; 71 *pte = new_pte;
72 /* 72 dsb(ishst);
73 * flush_pmd_entry just takes a void pointer and cleans the necessary
74 * cache entries, so we can reuse the function for ptes.
75 */
76 flush_pmd_entry(pte);
77}
78
79static inline void kvm_clean_pgd(pgd_t *pgd)
80{
81 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
82}
83
84static inline void kvm_clean_pmd(pmd_t *pmd)
85{
86 clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
87}
88
89static inline void kvm_clean_pmd_entry(pmd_t *pmd)
90{
91 clean_pmd_entry(pmd);
92}
93
94static inline void kvm_clean_pte(pte_t *pte)
95{
96 clean_pte_table(pte);
97} 73}
98 74
99static inline pte_t kvm_s2pte_mkwrite(pte_t pte) 75static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index a2b3eb313a25..b38c10c73579 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -84,6 +84,13 @@ struct kvm_regs {
84#define KVM_VGIC_V2_DIST_SIZE 0x1000 84#define KVM_VGIC_V2_DIST_SIZE 0x1000
85#define KVM_VGIC_V2_CPU_SIZE 0x2000 85#define KVM_VGIC_V2_CPU_SIZE 0x2000
86 86
87/* Supported VGICv3 address types */
88#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
89#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
90
91#define KVM_VGIC_V3_DIST_SIZE SZ_64K
92#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
93
87#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ 94#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
88#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */ 95#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */
89 96