diff options
Diffstat (limited to 'arch/powerpc/kvm/emulate.c')
-rw-r--r-- | arch/powerpc/kvm/emulate.c | 118 |
1 files changed, 67 insertions, 51 deletions
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 4a9ac6640fa..cb72a65f4ec 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -83,6 +83,9 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | |||
83 | 83 | ||
84 | pr_debug("mtDEC: %x\n", vcpu->arch.dec); | 84 | pr_debug("mtDEC: %x\n", vcpu->arch.dec); |
85 | #ifdef CONFIG_PPC64 | 85 | #ifdef CONFIG_PPC64 |
86 | /* mtdec lowers the interrupt line when positive. */ | ||
87 | kvmppc_core_dequeue_dec(vcpu); | ||
88 | |||
86 | /* POWER4+ triggers a dec interrupt if the value is < 0 */ | 89 | /* POWER4+ triggers a dec interrupt if the value is < 0 */ |
87 | if (vcpu->arch.dec & 0x80000000) { | 90 | if (vcpu->arch.dec & 0x80000000) { |
88 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | 91 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); |
@@ -140,14 +143,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
140 | 143 | ||
141 | pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); | 144 | pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); |
142 | 145 | ||
146 | /* Try again next time */ | ||
147 | if (inst == KVM_INST_FETCH_FAILED) | ||
148 | return EMULATE_DONE; | ||
149 | |||
143 | switch (get_op(inst)) { | 150 | switch (get_op(inst)) { |
144 | case OP_TRAP: | 151 | case OP_TRAP: |
145 | #ifdef CONFIG_PPC64 | 152 | #ifdef CONFIG_PPC64 |
146 | case OP_TRAP_64: | 153 | case OP_TRAP_64: |
154 | kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); | ||
147 | #else | 155 | #else |
148 | vcpu->arch.esr |= ESR_PTR; | 156 | kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR); |
149 | #endif | 157 | #endif |
150 | kvmppc_core_queue_program(vcpu); | ||
151 | advance = 0; | 158 | advance = 0; |
152 | break; | 159 | break; |
153 | 160 | ||
@@ -167,14 +174,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
167 | case OP_31_XOP_STWX: | 174 | case OP_31_XOP_STWX: |
168 | rs = get_rs(inst); | 175 | rs = get_rs(inst); |
169 | emulated = kvmppc_handle_store(run, vcpu, | 176 | emulated = kvmppc_handle_store(run, vcpu, |
170 | vcpu->arch.gpr[rs], | 177 | kvmppc_get_gpr(vcpu, rs), |
171 | 4, 1); | 178 | 4, 1); |
172 | break; | 179 | break; |
173 | 180 | ||
174 | case OP_31_XOP_STBX: | 181 | case OP_31_XOP_STBX: |
175 | rs = get_rs(inst); | 182 | rs = get_rs(inst); |
176 | emulated = kvmppc_handle_store(run, vcpu, | 183 | emulated = kvmppc_handle_store(run, vcpu, |
177 | vcpu->arch.gpr[rs], | 184 | kvmppc_get_gpr(vcpu, rs), |
178 | 1, 1); | 185 | 1, 1); |
179 | break; | 186 | break; |
180 | 187 | ||
@@ -183,14 +190,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
183 | ra = get_ra(inst); | 190 | ra = get_ra(inst); |
184 | rb = get_rb(inst); | 191 | rb = get_rb(inst); |
185 | 192 | ||
186 | ea = vcpu->arch.gpr[rb]; | 193 | ea = kvmppc_get_gpr(vcpu, rb); |
187 | if (ra) | 194 | if (ra) |
188 | ea += vcpu->arch.gpr[ra]; | 195 | ea += kvmppc_get_gpr(vcpu, ra); |
189 | 196 | ||
190 | emulated = kvmppc_handle_store(run, vcpu, | 197 | emulated = kvmppc_handle_store(run, vcpu, |
191 | vcpu->arch.gpr[rs], | 198 | kvmppc_get_gpr(vcpu, rs), |
192 | 1, 1); | 199 | 1, 1); |
193 | vcpu->arch.gpr[rs] = ea; | 200 | kvmppc_set_gpr(vcpu, rs, ea); |
194 | break; | 201 | break; |
195 | 202 | ||
196 | case OP_31_XOP_LHZX: | 203 | case OP_31_XOP_LHZX: |
@@ -203,12 +210,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
203 | ra = get_ra(inst); | 210 | ra = get_ra(inst); |
204 | rb = get_rb(inst); | 211 | rb = get_rb(inst); |
205 | 212 | ||
206 | ea = vcpu->arch.gpr[rb]; | 213 | ea = kvmppc_get_gpr(vcpu, rb); |
207 | if (ra) | 214 | if (ra) |
208 | ea += vcpu->arch.gpr[ra]; | 215 | ea += kvmppc_get_gpr(vcpu, ra); |
209 | 216 | ||
210 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 217 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
211 | vcpu->arch.gpr[ra] = ea; | 218 | kvmppc_set_gpr(vcpu, ra, ea); |
212 | break; | 219 | break; |
213 | 220 | ||
214 | case OP_31_XOP_MFSPR: | 221 | case OP_31_XOP_MFSPR: |
@@ -217,47 +224,49 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
217 | 224 | ||
218 | switch (sprn) { | 225 | switch (sprn) { |
219 | case SPRN_SRR0: | 226 | case SPRN_SRR0: |
220 | vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; | 227 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break; |
221 | case SPRN_SRR1: | 228 | case SPRN_SRR1: |
222 | vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; | 229 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break; |
223 | case SPRN_PVR: | 230 | case SPRN_PVR: |
224 | vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; | 231 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; |
225 | case SPRN_PIR: | 232 | case SPRN_PIR: |
226 | vcpu->arch.gpr[rt] = vcpu->vcpu_id; break; | 233 | kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; |
227 | case SPRN_MSSSR0: | 234 | case SPRN_MSSSR0: |
228 | vcpu->arch.gpr[rt] = 0; break; | 235 | kvmppc_set_gpr(vcpu, rt, 0); break; |
229 | 236 | ||
230 | /* Note: mftb and TBRL/TBWL are user-accessible, so | 237 | /* Note: mftb and TBRL/TBWL are user-accessible, so |
231 | * the guest can always access the real TB anyways. | 238 | * the guest can always access the real TB anyways. |
232 | * In fact, we probably will never see these traps. */ | 239 | * In fact, we probably will never see these traps. */ |
233 | case SPRN_TBWL: | 240 | case SPRN_TBWL: |
234 | vcpu->arch.gpr[rt] = get_tb() >> 32; break; | 241 | kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; |
235 | case SPRN_TBWU: | 242 | case SPRN_TBWU: |
236 | vcpu->arch.gpr[rt] = get_tb(); break; | 243 | kvmppc_set_gpr(vcpu, rt, get_tb()); break; |
237 | 244 | ||
238 | case SPRN_SPRG0: | 245 | case SPRN_SPRG0: |
239 | vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; | 246 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break; |
240 | case SPRN_SPRG1: | 247 | case SPRN_SPRG1: |
241 | vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; | 248 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break; |
242 | case SPRN_SPRG2: | 249 | case SPRN_SPRG2: |
243 | vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; | 250 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break; |
244 | case SPRN_SPRG3: | 251 | case SPRN_SPRG3: |
245 | vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; | 252 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break; |
246 | /* Note: SPRG4-7 are user-readable, so we don't get | 253 | /* Note: SPRG4-7 are user-readable, so we don't get |
247 | * a trap. */ | 254 | * a trap. */ |
248 | 255 | ||
249 | case SPRN_DEC: | 256 | case SPRN_DEC: |
250 | { | 257 | { |
251 | u64 jd = get_tb() - vcpu->arch.dec_jiffies; | 258 | u64 jd = get_tb() - vcpu->arch.dec_jiffies; |
252 | vcpu->arch.gpr[rt] = vcpu->arch.dec - jd; | 259 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd); |
253 | pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]); | 260 | pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", |
261 | vcpu->arch.dec, jd, | ||
262 | kvmppc_get_gpr(vcpu, rt)); | ||
254 | break; | 263 | break; |
255 | } | 264 | } |
256 | default: | 265 | default: |
257 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); | 266 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); |
258 | if (emulated == EMULATE_FAIL) { | 267 | if (emulated == EMULATE_FAIL) { |
259 | printk("mfspr: unknown spr %x\n", sprn); | 268 | printk("mfspr: unknown spr %x\n", sprn); |
260 | vcpu->arch.gpr[rt] = 0; | 269 | kvmppc_set_gpr(vcpu, rt, 0); |
261 | } | 270 | } |
262 | break; | 271 | break; |
263 | } | 272 | } |
@@ -269,7 +278,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
269 | rb = get_rb(inst); | 278 | rb = get_rb(inst); |
270 | 279 | ||
271 | emulated = kvmppc_handle_store(run, vcpu, | 280 | emulated = kvmppc_handle_store(run, vcpu, |
272 | vcpu->arch.gpr[rs], | 281 | kvmppc_get_gpr(vcpu, rs), |
273 | 2, 1); | 282 | 2, 1); |
274 | break; | 283 | break; |
275 | 284 | ||
@@ -278,14 +287,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
278 | ra = get_ra(inst); | 287 | ra = get_ra(inst); |
279 | rb = get_rb(inst); | 288 | rb = get_rb(inst); |
280 | 289 | ||
281 | ea = vcpu->arch.gpr[rb]; | 290 | ea = kvmppc_get_gpr(vcpu, rb); |
282 | if (ra) | 291 | if (ra) |
283 | ea += vcpu->arch.gpr[ra]; | 292 | ea += kvmppc_get_gpr(vcpu, ra); |
284 | 293 | ||
285 | emulated = kvmppc_handle_store(run, vcpu, | 294 | emulated = kvmppc_handle_store(run, vcpu, |
286 | vcpu->arch.gpr[rs], | 295 | kvmppc_get_gpr(vcpu, rs), |
287 | 2, 1); | 296 | 2, 1); |
288 | vcpu->arch.gpr[ra] = ea; | 297 | kvmppc_set_gpr(vcpu, ra, ea); |
289 | break; | 298 | break; |
290 | 299 | ||
291 | case OP_31_XOP_MTSPR: | 300 | case OP_31_XOP_MTSPR: |
@@ -293,9 +302,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
293 | rs = get_rs(inst); | 302 | rs = get_rs(inst); |
294 | switch (sprn) { | 303 | switch (sprn) { |
295 | case SPRN_SRR0: | 304 | case SPRN_SRR0: |
296 | vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; | 305 | vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break; |
297 | case SPRN_SRR1: | 306 | case SPRN_SRR1: |
298 | vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; | 307 | vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break; |
299 | 308 | ||
300 | /* XXX We need to context-switch the timebase for | 309 | /* XXX We need to context-switch the timebase for |
301 | * watchdog and FIT. */ | 310 | * watchdog and FIT. */ |
@@ -305,18 +314,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
305 | case SPRN_MSSSR0: break; | 314 | case SPRN_MSSSR0: break; |
306 | 315 | ||
307 | case SPRN_DEC: | 316 | case SPRN_DEC: |
308 | vcpu->arch.dec = vcpu->arch.gpr[rs]; | 317 | vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); |
309 | kvmppc_emulate_dec(vcpu); | 318 | kvmppc_emulate_dec(vcpu); |
310 | break; | 319 | break; |
311 | 320 | ||
312 | case SPRN_SPRG0: | 321 | case SPRN_SPRG0: |
313 | vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; | 322 | vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break; |
314 | case SPRN_SPRG1: | 323 | case SPRN_SPRG1: |
315 | vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; | 324 | vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break; |
316 | case SPRN_SPRG2: | 325 | case SPRN_SPRG2: |
317 | vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; | 326 | vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break; |
318 | case SPRN_SPRG3: | 327 | case SPRN_SPRG3: |
319 | vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; | 328 | vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break; |
320 | 329 | ||
321 | default: | 330 | default: |
322 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); | 331 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); |
@@ -348,7 +357,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
348 | rb = get_rb(inst); | 357 | rb = get_rb(inst); |
349 | 358 | ||
350 | emulated = kvmppc_handle_store(run, vcpu, | 359 | emulated = kvmppc_handle_store(run, vcpu, |
351 | vcpu->arch.gpr[rs], | 360 | kvmppc_get_gpr(vcpu, rs), |
352 | 4, 0); | 361 | 4, 0); |
353 | break; | 362 | break; |
354 | 363 | ||
@@ -363,7 +372,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
363 | rb = get_rb(inst); | 372 | rb = get_rb(inst); |
364 | 373 | ||
365 | emulated = kvmppc_handle_store(run, vcpu, | 374 | emulated = kvmppc_handle_store(run, vcpu, |
366 | vcpu->arch.gpr[rs], | 375 | kvmppc_get_gpr(vcpu, rs), |
367 | 2, 0); | 376 | 2, 0); |
368 | break; | 377 | break; |
369 | 378 | ||
@@ -382,7 +391,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
382 | ra = get_ra(inst); | 391 | ra = get_ra(inst); |
383 | rt = get_rt(inst); | 392 | rt = get_rt(inst); |
384 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 393 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
385 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 394 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
386 | break; | 395 | break; |
387 | 396 | ||
388 | case OP_LBZ: | 397 | case OP_LBZ: |
@@ -394,35 +403,39 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
394 | ra = get_ra(inst); | 403 | ra = get_ra(inst); |
395 | rt = get_rt(inst); | 404 | rt = get_rt(inst); |
396 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 405 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
397 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 406 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
398 | break; | 407 | break; |
399 | 408 | ||
400 | case OP_STW: | 409 | case OP_STW: |
401 | rs = get_rs(inst); | 410 | rs = get_rs(inst); |
402 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 411 | emulated = kvmppc_handle_store(run, vcpu, |
412 | kvmppc_get_gpr(vcpu, rs), | ||
403 | 4, 1); | 413 | 4, 1); |
404 | break; | 414 | break; |
405 | 415 | ||
406 | case OP_STWU: | 416 | case OP_STWU: |
407 | ra = get_ra(inst); | 417 | ra = get_ra(inst); |
408 | rs = get_rs(inst); | 418 | rs = get_rs(inst); |
409 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 419 | emulated = kvmppc_handle_store(run, vcpu, |
420 | kvmppc_get_gpr(vcpu, rs), | ||
410 | 4, 1); | 421 | 4, 1); |
411 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 422 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
412 | break; | 423 | break; |
413 | 424 | ||
414 | case OP_STB: | 425 | case OP_STB: |
415 | rs = get_rs(inst); | 426 | rs = get_rs(inst); |
416 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 427 | emulated = kvmppc_handle_store(run, vcpu, |
428 | kvmppc_get_gpr(vcpu, rs), | ||
417 | 1, 1); | 429 | 1, 1); |
418 | break; | 430 | break; |
419 | 431 | ||
420 | case OP_STBU: | 432 | case OP_STBU: |
421 | ra = get_ra(inst); | 433 | ra = get_ra(inst); |
422 | rs = get_rs(inst); | 434 | rs = get_rs(inst); |
423 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 435 | emulated = kvmppc_handle_store(run, vcpu, |
436 | kvmppc_get_gpr(vcpu, rs), | ||
424 | 1, 1); | 437 | 1, 1); |
425 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 438 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
426 | break; | 439 | break; |
427 | 440 | ||
428 | case OP_LHZ: | 441 | case OP_LHZ: |
@@ -434,21 +447,23 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
434 | ra = get_ra(inst); | 447 | ra = get_ra(inst); |
435 | rt = get_rt(inst); | 448 | rt = get_rt(inst); |
436 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 449 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
437 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 450 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
438 | break; | 451 | break; |
439 | 452 | ||
440 | case OP_STH: | 453 | case OP_STH: |
441 | rs = get_rs(inst); | 454 | rs = get_rs(inst); |
442 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 455 | emulated = kvmppc_handle_store(run, vcpu, |
456 | kvmppc_get_gpr(vcpu, rs), | ||
443 | 2, 1); | 457 | 2, 1); |
444 | break; | 458 | break; |
445 | 459 | ||
446 | case OP_STHU: | 460 | case OP_STHU: |
447 | ra = get_ra(inst); | 461 | ra = get_ra(inst); |
448 | rs = get_rs(inst); | 462 | rs = get_rs(inst); |
449 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 463 | emulated = kvmppc_handle_store(run, vcpu, |
464 | kvmppc_get_gpr(vcpu, rs), | ||
450 | 2, 1); | 465 | 2, 1); |
451 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 466 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
452 | break; | 467 | break; |
453 | 468 | ||
454 | default: | 469 | default: |
@@ -461,6 +476,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
461 | advance = 0; | 476 | advance = 0; |
462 | printk(KERN_ERR "Couldn't emulate instruction 0x%08x " | 477 | printk(KERN_ERR "Couldn't emulate instruction 0x%08x " |
463 | "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); | 478 | "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); |
479 | kvmppc_core_queue_program(vcpu, 0); | ||
464 | } | 480 | } |
465 | } | 481 | } |
466 | 482 | ||