aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/e500_emulate.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2012-05-04 08:55:12 -0400
committerAlexander Graf <agraf@suse.de>2012-05-06 10:19:13 -0400
commit54771e6217ce05a474827d9b23ff03de9d2ef2a0 (patch)
tree4555f93d29863b6c0bbd4be61c60bfe7b80ce6c9 /arch/powerpc/kvm/e500_emulate.c
parentc46dc9a86148bc37c31d67a22a3887144ba7aa81 (diff)
KVM: PPC: Emulator: clean up SPR reads and writes
When reading and writing SPRs, every SPR emulation piece had to read or write the respective GPR the value was read from or stored in itself. This approach is pretty prone to failure. What if we accidentally implement mfspr emulation where we just do "break" and nothing else? Suddenly we would get a random value in the return register - which is always a bad idea. So let's consolidate the generic code paths and only give the core specific SPR handling code readily made variables to read/write from/to. Functionally, this patch doesn't change anything, but it increases the readability of the code and makes is less prone to bugs. Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/e500_emulate.c')
-rw-r--r--arch/powerpc/kvm/e500_emulate.c110
1 files changed, 66 insertions, 44 deletions
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 9b2dcda71950..8b99e076dc81 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -140,11 +140,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
140 return emulated; 140 return emulated;
141} 141}
142 142
143int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 143int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
144{ 144{
145 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 145 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
146 int emulated = EMULATE_DONE; 146 int emulated = EMULATE_DONE;
147 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
148 147
149 switch (sprn) { 148 switch (sprn) {
150#ifndef CONFIG_KVM_BOOKE_HV 149#ifndef CONFIG_KVM_BOOKE_HV
@@ -154,25 +153,32 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
154 case SPRN_PID1: 153 case SPRN_PID1:
155 if (spr_val != 0) 154 if (spr_val != 0)
156 return EMULATE_FAIL; 155 return EMULATE_FAIL;
157 vcpu_e500->pid[1] = spr_val; break; 156 vcpu_e500->pid[1] = spr_val;
157 break;
158 case SPRN_PID2: 158 case SPRN_PID2:
159 if (spr_val != 0) 159 if (spr_val != 0)
160 return EMULATE_FAIL; 160 return EMULATE_FAIL;
161 vcpu_e500->pid[2] = spr_val; break; 161 vcpu_e500->pid[2] = spr_val;
162 break;
162 case SPRN_MAS0: 163 case SPRN_MAS0:
163 vcpu->arch.shared->mas0 = spr_val; break; 164 vcpu->arch.shared->mas0 = spr_val;
165 break;
164 case SPRN_MAS1: 166 case SPRN_MAS1:
165 vcpu->arch.shared->mas1 = spr_val; break; 167 vcpu->arch.shared->mas1 = spr_val;
168 break;
166 case SPRN_MAS2: 169 case SPRN_MAS2:
167 vcpu->arch.shared->mas2 = spr_val; break; 170 vcpu->arch.shared->mas2 = spr_val;
171 break;
168 case SPRN_MAS3: 172 case SPRN_MAS3:
169 vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff; 173 vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
170 vcpu->arch.shared->mas7_3 |= spr_val; 174 vcpu->arch.shared->mas7_3 |= spr_val;
171 break; 175 break;
172 case SPRN_MAS4: 176 case SPRN_MAS4:
173 vcpu->arch.shared->mas4 = spr_val; break; 177 vcpu->arch.shared->mas4 = spr_val;
178 break;
174 case SPRN_MAS6: 179 case SPRN_MAS6:
175 vcpu->arch.shared->mas6 = spr_val; break; 180 vcpu->arch.shared->mas6 = spr_val;
181 break;
176 case SPRN_MAS7: 182 case SPRN_MAS7:
177 vcpu->arch.shared->mas7_3 &= (u64)0xffffffff; 183 vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
178 vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; 184 vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
@@ -183,11 +189,14 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
183 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); 189 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
184 break; 190 break;
185 case SPRN_L1CSR1: 191 case SPRN_L1CSR1:
186 vcpu_e500->l1csr1 = spr_val; break; 192 vcpu_e500->l1csr1 = spr_val;
193 break;
187 case SPRN_HID0: 194 case SPRN_HID0:
188 vcpu_e500->hid0 = spr_val; break; 195 vcpu_e500->hid0 = spr_val;
196 break;
189 case SPRN_HID1: 197 case SPRN_HID1:
190 vcpu_e500->hid1 = spr_val; break; 198 vcpu_e500->hid1 = spr_val;
199 break;
191 200
192 case SPRN_MMUCSR0: 201 case SPRN_MMUCSR0:
193 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, 202 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
@@ -216,90 +225,103 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
216 break; 225 break;
217#endif 226#endif
218 default: 227 default:
219 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); 228 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
220 } 229 }
221 230
222 return emulated; 231 return emulated;
223} 232}
224 233
225int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) 234int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
226{ 235{
227 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 236 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
228 int emulated = EMULATE_DONE; 237 int emulated = EMULATE_DONE;
229 238
230 switch (sprn) { 239 switch (sprn) {
231#ifndef CONFIG_KVM_BOOKE_HV 240#ifndef CONFIG_KVM_BOOKE_HV
232 unsigned long val;
233
234 case SPRN_PID: 241 case SPRN_PID:
235 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break; 242 *spr_val = vcpu_e500->pid[0];
243 break;
236 case SPRN_PID1: 244 case SPRN_PID1:
237 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break; 245 *spr_val = vcpu_e500->pid[1];
246 break;
238 case SPRN_PID2: 247 case SPRN_PID2:
239 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break; 248 *spr_val = vcpu_e500->pid[2];
249 break;
240 case SPRN_MAS0: 250 case SPRN_MAS0:
241 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break; 251 *spr_val = vcpu->arch.shared->mas0;
252 break;
242 case SPRN_MAS1: 253 case SPRN_MAS1:
243 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break; 254 *spr_val = vcpu->arch.shared->mas1;
255 break;
244 case SPRN_MAS2: 256 case SPRN_MAS2:
245 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break; 257 *spr_val = vcpu->arch.shared->mas2;
258 break;
246 case SPRN_MAS3: 259 case SPRN_MAS3:
247 val = (u32)vcpu->arch.shared->mas7_3; 260 *spr_val = (u32)vcpu->arch.shared->mas7_3;
248 kvmppc_set_gpr(vcpu, rt, val);
249 break; 261 break;
250 case SPRN_MAS4: 262 case SPRN_MAS4:
251 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break; 263 *spr_val = vcpu->arch.shared->mas4;
264 break;
252 case SPRN_MAS6: 265 case SPRN_MAS6:
253 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break; 266 *spr_val = vcpu->arch.shared->mas6;
267 break;
254 case SPRN_MAS7: 268 case SPRN_MAS7:
255 val = vcpu->arch.shared->mas7_3 >> 32; 269 *spr_val = vcpu->arch.shared->mas7_3 >> 32;
256 kvmppc_set_gpr(vcpu, rt, val);
257 break; 270 break;
258#endif 271#endif
259 case SPRN_TLB0CFG: 272 case SPRN_TLB0CFG:
260 kvmppc_set_gpr(vcpu, rt, vcpu->arch.tlbcfg[0]); break; 273 *spr_val = vcpu->arch.tlbcfg[0];
274 break;
261 case SPRN_TLB1CFG: 275 case SPRN_TLB1CFG:
262 kvmppc_set_gpr(vcpu, rt, vcpu->arch.tlbcfg[1]); break; 276 *spr_val = vcpu->arch.tlbcfg[1];
277 break;
263 case SPRN_L1CSR0: 278 case SPRN_L1CSR0:
264 kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; 279 *spr_val = vcpu_e500->l1csr0;
280 break;
265 case SPRN_L1CSR1: 281 case SPRN_L1CSR1:
266 kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break; 282 *spr_val = vcpu_e500->l1csr1;
283 break;
267 case SPRN_HID0: 284 case SPRN_HID0:
268 kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break; 285 *spr_val = vcpu_e500->hid0;
286 break;
269 case SPRN_HID1: 287 case SPRN_HID1:
270 kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break; 288 *spr_val = vcpu_e500->hid1;
289 break;
271 case SPRN_SVR: 290 case SPRN_SVR:
272 kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break; 291 *spr_val = vcpu_e500->svr;
292 break;
273 293
274 case SPRN_MMUCSR0: 294 case SPRN_MMUCSR0:
275 kvmppc_set_gpr(vcpu, rt, 0); break; 295 *spr_val = 0;
296 break;
276 297
277 case SPRN_MMUCFG: 298 case SPRN_MMUCFG:
278 kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucfg); break; 299 *spr_val = vcpu->arch.mmucfg;
300 break;
279 301
280 /* extra exceptions */ 302 /* extra exceptions */
281 case SPRN_IVOR32: 303 case SPRN_IVOR32:
282 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]); 304 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
283 break; 305 break;
284 case SPRN_IVOR33: 306 case SPRN_IVOR33:
285 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]); 307 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
286 break; 308 break;
287 case SPRN_IVOR34: 309 case SPRN_IVOR34:
288 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]); 310 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
289 break; 311 break;
290 case SPRN_IVOR35: 312 case SPRN_IVOR35:
291 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]); 313 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
292 break; 314 break;
293#ifdef CONFIG_KVM_BOOKE_HV 315#ifdef CONFIG_KVM_BOOKE_HV
294 case SPRN_IVOR36: 316 case SPRN_IVOR36:
295 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]); 317 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
296 break; 318 break;
297 case SPRN_IVOR37: 319 case SPRN_IVOR37:
298 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]); 320 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
299 break; 321 break;
300#endif 322#endif
301 default: 323 default:
302 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); 324 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
303 } 325 }
304 326
305 return emulated; 327 return emulated;