aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-12-04 12:32:32 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2015-12-04 12:32:32 -0500
commit09922076003ad66de41ea14d2f8c3b4a16ec7774 (patch)
tree169fd359fbde339faad6736363f57566997c99a8
parent31ade3b83e1821da5fbb2f11b5b3d4ab2ec39db8 (diff)
parent0de58f852875a0f0dcfb120bb8433e4e73c7803b (diff)
Merge tag 'kvm-arm-for-v4.4-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master
KVM/ARM fixes for v4.4-rc4 - A series of fixes to deal with the aliasing between the sp and xzr register - A fix for the cache flush fix that went in -rc3
-rw-r--r--arch/arm/include/asm/kvm_emulate.h12
-rw-r--r--arch/arm/kvm/mmio.c5
-rw-r--r--arch/arm/kvm/mmu.c4
-rw-r--r--arch/arm/kvm/psci.c20
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h18
-rw-r--r--arch/arm64/kvm/handle_exit.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c123
-rw-r--r--arch/arm64/kvm/sys_regs.h8
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c4
9 files changed, 107 insertions, 89 deletions
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index a9c80a2ea1a7..3095df091ff8 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -28,6 +28,18 @@
28unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); 28unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
29unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); 29unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
30 30
31static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
32 u8 reg_num)
33{
34 return *vcpu_reg(vcpu, reg_num);
35}
36
37static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
38 unsigned long val)
39{
40 *vcpu_reg(vcpu, reg_num) = val;
41}
42
31bool kvm_condition_valid(struct kvm_vcpu *vcpu); 43bool kvm_condition_valid(struct kvm_vcpu *vcpu);
32void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); 44void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
33void kvm_inject_undefined(struct kvm_vcpu *vcpu); 45void kvm_inject_undefined(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 974b1c606d04..3a10c9f1d0a4 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
115 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, 115 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
116 data); 116 data);
117 data = vcpu_data_host_to_guest(vcpu, data, len); 117 data = vcpu_data_host_to_guest(vcpu, data, len);
118 *vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data; 118 vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
119 } 119 }
120 120
121 return 0; 121 return 0;
@@ -186,7 +186,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
186 rt = vcpu->arch.mmio_decode.rt; 186 rt = vcpu->arch.mmio_decode.rt;
187 187
188 if (is_write) { 188 if (is_write) {
189 data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len); 189 data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
190 len);
190 191
191 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data); 192 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
192 mmio_write_buf(data_buf, len, data); 193 mmio_write_buf(data_buf, len, data);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 7dace909d5cf..61d96a645ff3 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -218,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
218 kvm_tlb_flush_vmid_ipa(kvm, addr); 218 kvm_tlb_flush_vmid_ipa(kvm, addr);
219 219
220 /* No need to invalidate the cache for device mappings */ 220 /* No need to invalidate the cache for device mappings */
221 if (!kvm_is_device_pfn(__phys_to_pfn(addr))) 221 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
222 kvm_flush_dcache_pte(old_pte); 222 kvm_flush_dcache_pte(old_pte);
223 223
224 put_page(virt_to_page(pte)); 224 put_page(virt_to_page(pte));
@@ -310,7 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
310 310
311 pte = pte_offset_kernel(pmd, addr); 311 pte = pte_offset_kernel(pmd, addr);
312 do { 312 do {
313 if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr))) 313 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
314 kvm_flush_dcache_pte(*pte); 314 kvm_flush_dcache_pte(*pte);
315 } while (pte++, addr += PAGE_SIZE, addr != end); 315 } while (pte++, addr += PAGE_SIZE, addr != end);
316} 316}
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 0b556968a6da..a9b3b905e661 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
75 unsigned long context_id; 75 unsigned long context_id;
76 phys_addr_t target_pc; 76 phys_addr_t target_pc;
77 77
78 cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; 78 cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
79 if (vcpu_mode_is_32bit(source_vcpu)) 79 if (vcpu_mode_is_32bit(source_vcpu))
80 cpu_id &= ~((u32) 0); 80 cpu_id &= ~((u32) 0);
81 81
@@ -94,8 +94,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
94 return PSCI_RET_INVALID_PARAMS; 94 return PSCI_RET_INVALID_PARAMS;
95 } 95 }
96 96
97 target_pc = *vcpu_reg(source_vcpu, 2); 97 target_pc = vcpu_get_reg(source_vcpu, 2);
98 context_id = *vcpu_reg(source_vcpu, 3); 98 context_id = vcpu_get_reg(source_vcpu, 3);
99 99
100 kvm_reset_vcpu(vcpu); 100 kvm_reset_vcpu(vcpu);
101 101
@@ -114,7 +114,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
114 * NOTE: We always update r0 (or x0) because for PSCI v0.1 114 * NOTE: We always update r0 (or x0) because for PSCI v0.1
115 * the general puspose registers are undefined upon CPU_ON. 115 * the general puspose registers are undefined upon CPU_ON.
116 */ 116 */
117 *vcpu_reg(vcpu, 0) = context_id; 117 vcpu_set_reg(vcpu, 0, context_id);
118 vcpu->arch.power_off = false; 118 vcpu->arch.power_off = false;
119 smp_mb(); /* Make sure the above is visible */ 119 smp_mb(); /* Make sure the above is visible */
120 120
@@ -134,8 +134,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
134 struct kvm *kvm = vcpu->kvm; 134 struct kvm *kvm = vcpu->kvm;
135 struct kvm_vcpu *tmp; 135 struct kvm_vcpu *tmp;
136 136
137 target_affinity = *vcpu_reg(vcpu, 1); 137 target_affinity = vcpu_get_reg(vcpu, 1);
138 lowest_affinity_level = *vcpu_reg(vcpu, 2); 138 lowest_affinity_level = vcpu_get_reg(vcpu, 2);
139 139
140 /* Determine target affinity mask */ 140 /* Determine target affinity mask */
141 target_affinity_mask = psci_affinity_mask(lowest_affinity_level); 141 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
@@ -209,7 +209,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
209static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) 209static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
210{ 210{
211 int ret = 1; 211 int ret = 1;
212 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); 212 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
213 unsigned long val; 213 unsigned long val;
214 214
215 switch (psci_fn) { 215 switch (psci_fn) {
@@ -273,13 +273,13 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
273 break; 273 break;
274 } 274 }
275 275
276 *vcpu_reg(vcpu, 0) = val; 276 vcpu_set_reg(vcpu, 0, val);
277 return ret; 277 return ret;
278} 278}
279 279
280static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) 280static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
281{ 281{
282 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); 282 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
283 unsigned long val; 283 unsigned long val;
284 284
285 switch (psci_fn) { 285 switch (psci_fn) {
@@ -295,7 +295,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
295 break; 295 break;
296 } 296 }
297 297
298 *vcpu_reg(vcpu, 0) = val; 298 vcpu_set_reg(vcpu, 0, val);
299 return 1; 299 return 1;
300} 300}
301 301
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 3ca894ecf699..25a40213bd9b 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -100,13 +100,21 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
100} 100}
101 101
102/* 102/*
103 * vcpu_reg should always be passed a register number coming from a 103 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
104 * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32 104 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
105 * with banked registers. 105 * AArch32 with banked registers.
106 */ 106 */
107static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) 107static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
108 u8 reg_num)
108{ 109{
109 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; 110 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
111}
112
113static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
114 unsigned long val)
115{
116 if (reg_num != 31)
117 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
110} 118}
111 119
112/* Get vcpu SPSR for current mode */ 120/* Get vcpu SPSR for current mode */
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 68a0759b1375..15f0477b0d2a 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -37,7 +37,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
37{ 37{
38 int ret; 38 int ret;
39 39
40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), 40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
41 kvm_vcpu_hvc_get_imm(vcpu)); 41 kvm_vcpu_hvc_get_imm(vcpu));
42 42
43 ret = kvm_psci_call(vcpu); 43 ret = kvm_psci_call(vcpu);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 87a64e8db04c..d2650e84faf2 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -78,7 +78,7 @@ static u32 get_ccsidr(u32 csselr)
78 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 78 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
79 */ 79 */
80static bool access_dcsw(struct kvm_vcpu *vcpu, 80static bool access_dcsw(struct kvm_vcpu *vcpu,
81 const struct sys_reg_params *p, 81 struct sys_reg_params *p,
82 const struct sys_reg_desc *r) 82 const struct sys_reg_desc *r)
83{ 83{
84 if (!p->is_write) 84 if (!p->is_write)
@@ -94,21 +94,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
94 * sys_regs and leave it in complete control of the caches. 94 * sys_regs and leave it in complete control of the caches.
95 */ 95 */
96static bool access_vm_reg(struct kvm_vcpu *vcpu, 96static bool access_vm_reg(struct kvm_vcpu *vcpu,
97 const struct sys_reg_params *p, 97 struct sys_reg_params *p,
98 const struct sys_reg_desc *r) 98 const struct sys_reg_desc *r)
99{ 99{
100 unsigned long val;
101 bool was_enabled = vcpu_has_cache_enabled(vcpu); 100 bool was_enabled = vcpu_has_cache_enabled(vcpu);
102 101
103 BUG_ON(!p->is_write); 102 BUG_ON(!p->is_write);
104 103
105 val = *vcpu_reg(vcpu, p->Rt);
106 if (!p->is_aarch32) { 104 if (!p->is_aarch32) {
107 vcpu_sys_reg(vcpu, r->reg) = val; 105 vcpu_sys_reg(vcpu, r->reg) = p->regval;
108 } else { 106 } else {
109 if (!p->is_32bit) 107 if (!p->is_32bit)
110 vcpu_cp15_64_high(vcpu, r->reg) = val >> 32; 108 vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
111 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; 109 vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
112 } 110 }
113 111
114 kvm_toggle_cache(vcpu, was_enabled); 112 kvm_toggle_cache(vcpu, was_enabled);
@@ -122,22 +120,19 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
122 * for both AArch64 and AArch32 accesses. 120 * for both AArch64 and AArch32 accesses.
123 */ 121 */
124static bool access_gic_sgi(struct kvm_vcpu *vcpu, 122static bool access_gic_sgi(struct kvm_vcpu *vcpu,
125 const struct sys_reg_params *p, 123 struct sys_reg_params *p,
126 const struct sys_reg_desc *r) 124 const struct sys_reg_desc *r)
127{ 125{
128 u64 val;
129
130 if (!p->is_write) 126 if (!p->is_write)
131 return read_from_write_only(vcpu, p); 127 return read_from_write_only(vcpu, p);
132 128
133 val = *vcpu_reg(vcpu, p->Rt); 129 vgic_v3_dispatch_sgi(vcpu, p->regval);
134 vgic_v3_dispatch_sgi(vcpu, val);
135 130
136 return true; 131 return true;
137} 132}
138 133
139static bool trap_raz_wi(struct kvm_vcpu *vcpu, 134static bool trap_raz_wi(struct kvm_vcpu *vcpu,
140 const struct sys_reg_params *p, 135 struct sys_reg_params *p,
141 const struct sys_reg_desc *r) 136 const struct sys_reg_desc *r)
142{ 137{
143 if (p->is_write) 138 if (p->is_write)
@@ -147,19 +142,19 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
147} 142}
148 143
149static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 144static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
150 const struct sys_reg_params *p, 145 struct sys_reg_params *p,
151 const struct sys_reg_desc *r) 146 const struct sys_reg_desc *r)
152{ 147{
153 if (p->is_write) { 148 if (p->is_write) {
154 return ignore_write(vcpu, p); 149 return ignore_write(vcpu, p);
155 } else { 150 } else {
156 *vcpu_reg(vcpu, p->Rt) = (1 << 3); 151 p->regval = (1 << 3);
157 return true; 152 return true;
158 } 153 }
159} 154}
160 155
161static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, 156static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
162 const struct sys_reg_params *p, 157 struct sys_reg_params *p,
163 const struct sys_reg_desc *r) 158 const struct sys_reg_desc *r)
164{ 159{
165 if (p->is_write) { 160 if (p->is_write) {
@@ -167,7 +162,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
167 } else { 162 } else {
168 u32 val; 163 u32 val;
169 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); 164 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
170 *vcpu_reg(vcpu, p->Rt) = val; 165 p->regval = val;
171 return true; 166 return true;
172 } 167 }
173} 168}
@@ -200,17 +195,17 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
200 * now use the debug registers. 195 * now use the debug registers.
201 */ 196 */
202static bool trap_debug_regs(struct kvm_vcpu *vcpu, 197static bool trap_debug_regs(struct kvm_vcpu *vcpu,
203 const struct sys_reg_params *p, 198 struct sys_reg_params *p,
204 const struct sys_reg_desc *r) 199 const struct sys_reg_desc *r)
205{ 200{
206 if (p->is_write) { 201 if (p->is_write) {
207 vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); 202 vcpu_sys_reg(vcpu, r->reg) = p->regval;
208 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 203 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
209 } else { 204 } else {
210 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg); 205 p->regval = vcpu_sys_reg(vcpu, r->reg);
211 } 206 }
212 207
213 trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt)); 208 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
214 209
215 return true; 210 return true;
216} 211}
@@ -225,10 +220,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
225 * hyp.S code switches between host and guest values in future. 220 * hyp.S code switches between host and guest values in future.
226 */ 221 */
227static inline void reg_to_dbg(struct kvm_vcpu *vcpu, 222static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
228 const struct sys_reg_params *p, 223 struct sys_reg_params *p,
229 u64 *dbg_reg) 224 u64 *dbg_reg)
230{ 225{
231 u64 val = *vcpu_reg(vcpu, p->Rt); 226 u64 val = p->regval;
232 227
233 if (p->is_32bit) { 228 if (p->is_32bit) {
234 val &= 0xffffffffUL; 229 val &= 0xffffffffUL;
@@ -240,19 +235,16 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
240} 235}
241 236
242static inline void dbg_to_reg(struct kvm_vcpu *vcpu, 237static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
243 const struct sys_reg_params *p, 238 struct sys_reg_params *p,
244 u64 *dbg_reg) 239 u64 *dbg_reg)
245{ 240{
246 u64 val = *dbg_reg; 241 p->regval = *dbg_reg;
247
248 if (p->is_32bit) 242 if (p->is_32bit)
249 val &= 0xffffffffUL; 243 p->regval &= 0xffffffffUL;
250
251 *vcpu_reg(vcpu, p->Rt) = val;
252} 244}
253 245
254static inline bool trap_bvr(struct kvm_vcpu *vcpu, 246static inline bool trap_bvr(struct kvm_vcpu *vcpu,
255 const struct sys_reg_params *p, 247 struct sys_reg_params *p,
256 const struct sys_reg_desc *rd) 248 const struct sys_reg_desc *rd)
257{ 249{
258 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 250 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@@ -294,7 +286,7 @@ static inline void reset_bvr(struct kvm_vcpu *vcpu,
294} 286}
295 287
296static inline bool trap_bcr(struct kvm_vcpu *vcpu, 288static inline bool trap_bcr(struct kvm_vcpu *vcpu,
297 const struct sys_reg_params *p, 289 struct sys_reg_params *p,
298 const struct sys_reg_desc *rd) 290 const struct sys_reg_desc *rd)
299{ 291{
300 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 292 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
@@ -337,7 +329,7 @@ static inline void reset_bcr(struct kvm_vcpu *vcpu,
337} 329}
338 330
339static inline bool trap_wvr(struct kvm_vcpu *vcpu, 331static inline bool trap_wvr(struct kvm_vcpu *vcpu,
340 const struct sys_reg_params *p, 332 struct sys_reg_params *p,
341 const struct sys_reg_desc *rd) 333 const struct sys_reg_desc *rd)
342{ 334{
343 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 335 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
@@ -380,7 +372,7 @@ static inline void reset_wvr(struct kvm_vcpu *vcpu,
380} 372}
381 373
382static inline bool trap_wcr(struct kvm_vcpu *vcpu, 374static inline bool trap_wcr(struct kvm_vcpu *vcpu,
383 const struct sys_reg_params *p, 375 struct sys_reg_params *p,
384 const struct sys_reg_desc *rd) 376 const struct sys_reg_desc *rd)
385{ 377{
386 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 378 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
@@ -687,7 +679,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
687}; 679};
688 680
689static bool trap_dbgidr(struct kvm_vcpu *vcpu, 681static bool trap_dbgidr(struct kvm_vcpu *vcpu,
690 const struct sys_reg_params *p, 682 struct sys_reg_params *p,
691 const struct sys_reg_desc *r) 683 const struct sys_reg_desc *r)
692{ 684{
693 if (p->is_write) { 685 if (p->is_write) {
@@ -697,23 +689,23 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
697 u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1); 689 u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
698 u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT); 690 u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT);
699 691
700 *vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | 692 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
701 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | 693 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
702 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) | 694 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
703 (6 << 16) | (el3 << 14) | (el3 << 12)); 695 | (6 << 16) | (el3 << 14) | (el3 << 12));
704 return true; 696 return true;
705 } 697 }
706} 698}
707 699
708static bool trap_debug32(struct kvm_vcpu *vcpu, 700static bool trap_debug32(struct kvm_vcpu *vcpu,
709 const struct sys_reg_params *p, 701 struct sys_reg_params *p,
710 const struct sys_reg_desc *r) 702 const struct sys_reg_desc *r)
711{ 703{
712 if (p->is_write) { 704 if (p->is_write) {
713 vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); 705 vcpu_cp14(vcpu, r->reg) = p->regval;
714 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 706 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
715 } else { 707 } else {
716 *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg); 708 p->regval = vcpu_cp14(vcpu, r->reg);
717 } 709 }
718 710
719 return true; 711 return true;
@@ -731,7 +723,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
731 */ 723 */
732 724
733static inline bool trap_xvr(struct kvm_vcpu *vcpu, 725static inline bool trap_xvr(struct kvm_vcpu *vcpu,
734 const struct sys_reg_params *p, 726 struct sys_reg_params *p,
735 const struct sys_reg_desc *rd) 727 const struct sys_reg_desc *rd)
736{ 728{
737 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 729 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@@ -740,12 +732,12 @@ static inline bool trap_xvr(struct kvm_vcpu *vcpu,
740 u64 val = *dbg_reg; 732 u64 val = *dbg_reg;
741 733
742 val &= 0xffffffffUL; 734 val &= 0xffffffffUL;
743 val |= *vcpu_reg(vcpu, p->Rt) << 32; 735 val |= p->regval << 32;
744 *dbg_reg = val; 736 *dbg_reg = val;
745 737
746 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 738 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
747 } else { 739 } else {
748 *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32; 740 p->regval = *dbg_reg >> 32;
749 } 741 }
750 742
751 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 743 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
@@ -991,7 +983,7 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
991 * Return 0 if the access has been handled, and -1 if not. 983 * Return 0 if the access has been handled, and -1 if not.
992 */ 984 */
993static int emulate_cp(struct kvm_vcpu *vcpu, 985static int emulate_cp(struct kvm_vcpu *vcpu,
994 const struct sys_reg_params *params, 986 struct sys_reg_params *params,
995 const struct sys_reg_desc *table, 987 const struct sys_reg_desc *table,
996 size_t num) 988 size_t num)
997{ 989{
@@ -1062,12 +1054,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1062{ 1054{
1063 struct sys_reg_params params; 1055 struct sys_reg_params params;
1064 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1056 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1057 int Rt = (hsr >> 5) & 0xf;
1065 int Rt2 = (hsr >> 10) & 0xf; 1058 int Rt2 = (hsr >> 10) & 0xf;
1066 1059
1067 params.is_aarch32 = true; 1060 params.is_aarch32 = true;
1068 params.is_32bit = false; 1061 params.is_32bit = false;
1069 params.CRm = (hsr >> 1) & 0xf; 1062 params.CRm = (hsr >> 1) & 0xf;
1070 params.Rt = (hsr >> 5) & 0xf;
1071 params.is_write = ((hsr & 1) == 0); 1063 params.is_write = ((hsr & 1) == 0);
1072 1064
1073 params.Op0 = 0; 1065 params.Op0 = 0;
@@ -1076,15 +1068,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1076 params.CRn = 0; 1068 params.CRn = 0;
1077 1069
1078 /* 1070 /*
1079 * Massive hack here. Store Rt2 in the top 32bits so we only 1071 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1080 * have one register to deal with. As we use the same trap
1081 * backends between AArch32 and AArch64, we get away with it. 1072 * backends between AArch32 and AArch64, we get away with it.
1082 */ 1073 */
1083 if (params.is_write) { 1074 if (params.is_write) {
1084 u64 val = *vcpu_reg(vcpu, params.Rt); 1075 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1085 val &= 0xffffffff; 1076 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1086 val |= *vcpu_reg(vcpu, Rt2) << 32;
1087 *vcpu_reg(vcpu, params.Rt) = val;
1088 } 1077 }
1089 1078
1090 if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) 1079 if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
@@ -1095,11 +1084,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1095 unhandled_cp_access(vcpu, &params); 1084 unhandled_cp_access(vcpu, &params);
1096 1085
1097out: 1086out:
1098 /* Do the opposite hack for the read side */ 1087 /* Split up the value between registers for the read side */
1099 if (!params.is_write) { 1088 if (!params.is_write) {
1100 u64 val = *vcpu_reg(vcpu, params.Rt); 1089 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1101 val >>= 32; 1090 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1102 *vcpu_reg(vcpu, Rt2) = val;
1103 } 1091 }
1104 1092
1105 return 1; 1093 return 1;
@@ -1118,21 +1106,24 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1118{ 1106{
1119 struct sys_reg_params params; 1107 struct sys_reg_params params;
1120 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1108 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1109 int Rt = (hsr >> 5) & 0xf;
1121 1110
1122 params.is_aarch32 = true; 1111 params.is_aarch32 = true;
1123 params.is_32bit = true; 1112 params.is_32bit = true;
1124 params.CRm = (hsr >> 1) & 0xf; 1113 params.CRm = (hsr >> 1) & 0xf;
1125 params.Rt = (hsr >> 5) & 0xf; 1114 params.regval = vcpu_get_reg(vcpu, Rt);
1126 params.is_write = ((hsr & 1) == 0); 1115 params.is_write = ((hsr & 1) == 0);
1127 params.CRn = (hsr >> 10) & 0xf; 1116 params.CRn = (hsr >> 10) & 0xf;
1128 params.Op0 = 0; 1117 params.Op0 = 0;
1129 params.Op1 = (hsr >> 14) & 0x7; 1118 params.Op1 = (hsr >> 14) & 0x7;
1130 params.Op2 = (hsr >> 17) & 0x7; 1119 params.Op2 = (hsr >> 17) & 0x7;
1131 1120
1132 if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) 1121 if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1133 return 1; 1122 !emulate_cp(vcpu, &params, global, nr_global)) {
1134 if (!emulate_cp(vcpu, &params, global, nr_global)) 1123 if (!params.is_write)
1124 vcpu_set_reg(vcpu, Rt, params.regval);
1135 return 1; 1125 return 1;
1126 }
1136 1127
1137 unhandled_cp_access(vcpu, &params); 1128 unhandled_cp_access(vcpu, &params);
1138 return 1; 1129 return 1;
@@ -1175,7 +1166,7 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1175} 1166}
1176 1167
1177static int emulate_sys_reg(struct kvm_vcpu *vcpu, 1168static int emulate_sys_reg(struct kvm_vcpu *vcpu,
1178 const struct sys_reg_params *params) 1169 struct sys_reg_params *params)
1179{ 1170{
1180 size_t num; 1171 size_t num;
1181 const struct sys_reg_desc *table, *r; 1172 const struct sys_reg_desc *table, *r;
@@ -1230,6 +1221,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1230{ 1221{
1231 struct sys_reg_params params; 1222 struct sys_reg_params params;
1232 unsigned long esr = kvm_vcpu_get_hsr(vcpu); 1223 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1224 int Rt = (esr >> 5) & 0x1f;
1225 int ret;
1233 1226
1234 trace_kvm_handle_sys_reg(esr); 1227 trace_kvm_handle_sys_reg(esr);
1235 1228
@@ -1240,10 +1233,14 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1240 params.CRn = (esr >> 10) & 0xf; 1233 params.CRn = (esr >> 10) & 0xf;
1241 params.CRm = (esr >> 1) & 0xf; 1234 params.CRm = (esr >> 1) & 0xf;
1242 params.Op2 = (esr >> 17) & 0x7; 1235 params.Op2 = (esr >> 17) & 0x7;
1243 params.Rt = (esr >> 5) & 0x1f; 1236 params.regval = vcpu_get_reg(vcpu, Rt);
1244 params.is_write = !(esr & 1); 1237 params.is_write = !(esr & 1);
1245 1238
1246 return emulate_sys_reg(vcpu, &params); 1239 ret = emulate_sys_reg(vcpu, &params);
1240
1241 if (!params.is_write)
1242 vcpu_set_reg(vcpu, Rt, params.regval);
1243 return ret;
1247} 1244}
1248 1245
1249/****************************************************************************** 1246/******************************************************************************
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index eaa324e4db4d..dbbb01cfbee9 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -28,7 +28,7 @@ struct sys_reg_params {
28 u8 CRn; 28 u8 CRn;
29 u8 CRm; 29 u8 CRm;
30 u8 Op2; 30 u8 Op2;
31 u8 Rt; 31 u64 regval;
32 bool is_write; 32 bool is_write;
33 bool is_aarch32; 33 bool is_aarch32;
34 bool is_32bit; /* Only valid if is_aarch32 is true */ 34 bool is_32bit; /* Only valid if is_aarch32 is true */
@@ -44,7 +44,7 @@ struct sys_reg_desc {
44 44
45 /* Trapped access from guest, if non-NULL. */ 45 /* Trapped access from guest, if non-NULL. */
46 bool (*access)(struct kvm_vcpu *, 46 bool (*access)(struct kvm_vcpu *,
47 const struct sys_reg_params *, 47 struct sys_reg_params *,
48 const struct sys_reg_desc *); 48 const struct sys_reg_desc *);
49 49
50 /* Initialization for vcpu. */ 50 /* Initialization for vcpu. */
@@ -77,9 +77,9 @@ static inline bool ignore_write(struct kvm_vcpu *vcpu,
77} 77}
78 78
79static inline bool read_zero(struct kvm_vcpu *vcpu, 79static inline bool read_zero(struct kvm_vcpu *vcpu,
80 const struct sys_reg_params *p) 80 struct sys_reg_params *p)
81{ 81{
82 *vcpu_reg(vcpu, p->Rt) = 0; 82 p->regval = 0;
83 return true; 83 return true;
84} 84}
85 85
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 1e4576824165..ed90578fa120 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -31,13 +31,13 @@
31#include "sys_regs.h" 31#include "sys_regs.h"
32 32
33static bool access_actlr(struct kvm_vcpu *vcpu, 33static bool access_actlr(struct kvm_vcpu *vcpu,
34 const struct sys_reg_params *p, 34 struct sys_reg_params *p,
35 const struct sys_reg_desc *r) 35 const struct sys_reg_desc *r)
36{ 36{
37 if (p->is_write) 37 if (p->is_write)
38 return ignore_write(vcpu, p); 38 return ignore_write(vcpu, p);
39 39
40 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1); 40 p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1);
41 return true; 41 return true;
42} 42}
43 43