aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/booke.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/booke.c')
-rw-r--r--arch/powerpc/kvm/booke.c471
1 files changed, 392 insertions, 79 deletions
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ee9e1ee9c858..72f13f4a06e0 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -17,6 +17,8 @@
17 * 17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
20 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
20 */ 22 */
21 23
22#include <linux/errno.h> 24#include <linux/errno.h>
@@ -30,9 +32,12 @@
30#include <asm/cputable.h> 32#include <asm/cputable.h>
31#include <asm/uaccess.h> 33#include <asm/uaccess.h>
32#include <asm/kvm_ppc.h> 34#include <asm/kvm_ppc.h>
33#include "timing.h"
34#include <asm/cacheflush.h> 35#include <asm/cacheflush.h>
36#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
35 39
40#include "timing.h"
36#include "booke.h" 41#include "booke.h"
37 42
38unsigned long kvmppc_booke_handlers; 43unsigned long kvmppc_booke_handlers;
@@ -55,6 +60,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
55 { "dec", VCPU_STAT(dec_exits) }, 60 { "dec", VCPU_STAT(dec_exits) },
56 { "ext_intr", VCPU_STAT(ext_intr_exits) }, 61 { "ext_intr", VCPU_STAT(ext_intr_exits) },
57 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 62 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
63 { "doorbell", VCPU_STAT(dbell_exits) },
64 { "guest doorbell", VCPU_STAT(gdbell_exits) },
58 { NULL } 65 { NULL }
59}; 66};
60 67
@@ -121,6 +128,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
121{ 128{
122 u32 old_msr = vcpu->arch.shared->msr; 129 u32 old_msr = vcpu->arch.shared->msr;
123 130
131#ifdef CONFIG_KVM_BOOKE_HV
132 new_msr |= MSR_GS;
133#endif
134
124 vcpu->arch.shared->msr = new_msr; 135 vcpu->arch.shared->msr = new_msr;
125 136
126 kvmppc_mmu_msr_notify(vcpu, old_msr); 137 kvmppc_mmu_msr_notify(vcpu, old_msr);
@@ -195,17 +206,87 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
195 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); 206 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
196} 207}
197 208
209static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
210{
211#ifdef CONFIG_KVM_BOOKE_HV
212 mtspr(SPRN_GSRR0, srr0);
213 mtspr(SPRN_GSRR1, srr1);
214#else
215 vcpu->arch.shared->srr0 = srr0;
216 vcpu->arch.shared->srr1 = srr1;
217#endif
218}
219
220static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
221{
222 vcpu->arch.csrr0 = srr0;
223 vcpu->arch.csrr1 = srr1;
224}
225
226static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
227{
228 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
229 vcpu->arch.dsrr0 = srr0;
230 vcpu->arch.dsrr1 = srr1;
231 } else {
232 set_guest_csrr(vcpu, srr0, srr1);
233 }
234}
235
236static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
237{
238 vcpu->arch.mcsrr0 = srr0;
239 vcpu->arch.mcsrr1 = srr1;
240}
241
242static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
243{
244#ifdef CONFIG_KVM_BOOKE_HV
245 return mfspr(SPRN_GDEAR);
246#else
247 return vcpu->arch.shared->dar;
248#endif
249}
250
251static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
252{
253#ifdef CONFIG_KVM_BOOKE_HV
254 mtspr(SPRN_GDEAR, dear);
255#else
256 vcpu->arch.shared->dar = dear;
257#endif
258}
259
260static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
261{
262#ifdef CONFIG_KVM_BOOKE_HV
263 return mfspr(SPRN_GESR);
264#else
265 return vcpu->arch.shared->esr;
266#endif
267}
268
269static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
270{
271#ifdef CONFIG_KVM_BOOKE_HV
272 mtspr(SPRN_GESR, esr);
273#else
274 vcpu->arch.shared->esr = esr;
275#endif
276}
277
198/* Deliver the interrupt of the corresponding priority, if possible. */ 278/* Deliver the interrupt of the corresponding priority, if possible. */
199static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, 279static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
200 unsigned int priority) 280 unsigned int priority)
201{ 281{
202 int allowed = 0; 282 int allowed = 0;
203 ulong uninitialized_var(msr_mask); 283 ulong msr_mask = 0;
204 bool update_esr = false, update_dear = false; 284 bool update_esr = false, update_dear = false;
205 ulong crit_raw = vcpu->arch.shared->critical; 285 ulong crit_raw = vcpu->arch.shared->critical;
206 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); 286 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
207 bool crit; 287 bool crit;
208 bool keep_irq = false; 288 bool keep_irq = false;
289 enum int_class int_class;
209 290
210 /* Truncate crit indicators in 32 bit mode */ 291 /* Truncate crit indicators in 32 bit mode */
211 if (!(vcpu->arch.shared->msr & MSR_SF)) { 292 if (!(vcpu->arch.shared->msr & MSR_SF)) {
@@ -241,46 +322,85 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
241 case BOOKE_IRQPRIO_AP_UNAVAIL: 322 case BOOKE_IRQPRIO_AP_UNAVAIL:
242 case BOOKE_IRQPRIO_ALIGNMENT: 323 case BOOKE_IRQPRIO_ALIGNMENT:
243 allowed = 1; 324 allowed = 1;
244 msr_mask = MSR_CE|MSR_ME|MSR_DE; 325 msr_mask = MSR_CE | MSR_ME | MSR_DE;
326 int_class = INT_CLASS_NONCRIT;
245 break; 327 break;
246 case BOOKE_IRQPRIO_CRITICAL: 328 case BOOKE_IRQPRIO_CRITICAL:
247 case BOOKE_IRQPRIO_WATCHDOG: 329 case BOOKE_IRQPRIO_DBELL_CRIT:
248 allowed = vcpu->arch.shared->msr & MSR_CE; 330 allowed = vcpu->arch.shared->msr & MSR_CE;
331 allowed = allowed && !crit;
249 msr_mask = MSR_ME; 332 msr_mask = MSR_ME;
333 int_class = INT_CLASS_CRIT;
250 break; 334 break;
251 case BOOKE_IRQPRIO_MACHINE_CHECK: 335 case BOOKE_IRQPRIO_MACHINE_CHECK:
252 allowed = vcpu->arch.shared->msr & MSR_ME; 336 allowed = vcpu->arch.shared->msr & MSR_ME;
253 msr_mask = 0; 337 allowed = allowed && !crit;
338 int_class = INT_CLASS_MC;
254 break; 339 break;
255 case BOOKE_IRQPRIO_DECREMENTER: 340 case BOOKE_IRQPRIO_DECREMENTER:
256 case BOOKE_IRQPRIO_FIT: 341 case BOOKE_IRQPRIO_FIT:
257 keep_irq = true; 342 keep_irq = true;
258 /* fall through */ 343 /* fall through */
259 case BOOKE_IRQPRIO_EXTERNAL: 344 case BOOKE_IRQPRIO_EXTERNAL:
345 case BOOKE_IRQPRIO_DBELL:
260 allowed = vcpu->arch.shared->msr & MSR_EE; 346 allowed = vcpu->arch.shared->msr & MSR_EE;
261 allowed = allowed && !crit; 347 allowed = allowed && !crit;
262 msr_mask = MSR_CE|MSR_ME|MSR_DE; 348 msr_mask = MSR_CE | MSR_ME | MSR_DE;
349 int_class = INT_CLASS_NONCRIT;
263 break; 350 break;
264 case BOOKE_IRQPRIO_DEBUG: 351 case BOOKE_IRQPRIO_DEBUG:
265 allowed = vcpu->arch.shared->msr & MSR_DE; 352 allowed = vcpu->arch.shared->msr & MSR_DE;
353 allowed = allowed && !crit;
266 msr_mask = MSR_ME; 354 msr_mask = MSR_ME;
355 int_class = INT_CLASS_CRIT;
267 break; 356 break;
268 } 357 }
269 358
270 if (allowed) { 359 if (allowed) {
271 vcpu->arch.shared->srr0 = vcpu->arch.pc; 360 switch (int_class) {
272 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; 361 case INT_CLASS_NONCRIT:
362 set_guest_srr(vcpu, vcpu->arch.pc,
363 vcpu->arch.shared->msr);
364 break;
365 case INT_CLASS_CRIT:
366 set_guest_csrr(vcpu, vcpu->arch.pc,
367 vcpu->arch.shared->msr);
368 break;
369 case INT_CLASS_DBG:
370 set_guest_dsrr(vcpu, vcpu->arch.pc,
371 vcpu->arch.shared->msr);
372 break;
373 case INT_CLASS_MC:
374 set_guest_mcsrr(vcpu, vcpu->arch.pc,
375 vcpu->arch.shared->msr);
376 break;
377 }
378
273 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 379 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
274 if (update_esr == true) 380 if (update_esr == true)
275 vcpu->arch.shared->esr = vcpu->arch.queued_esr; 381 set_guest_esr(vcpu, vcpu->arch.queued_esr);
276 if (update_dear == true) 382 if (update_dear == true)
277 vcpu->arch.shared->dar = vcpu->arch.queued_dear; 383 set_guest_dear(vcpu, vcpu->arch.queued_dear);
278 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); 384 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
279 385
280 if (!keep_irq) 386 if (!keep_irq)
281 clear_bit(priority, &vcpu->arch.pending_exceptions); 387 clear_bit(priority, &vcpu->arch.pending_exceptions);
282 } 388 }
283 389
390#ifdef CONFIG_KVM_BOOKE_HV
391 /*
392 * If an interrupt is pending but masked, raise a guest doorbell
393 * so that we are notified when the guest enables the relevant
394 * MSR bit.
395 */
396 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
397 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
398 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
399 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
400 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
401 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
402#endif
403
284 return allowed; 404 return allowed;
285} 405}
286 406
@@ -305,7 +425,7 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
305 } 425 }
306 426
307 priority = __ffs(*pending); 427 priority = __ffs(*pending);
308 while (priority <= BOOKE_IRQPRIO_MAX) { 428 while (priority < BOOKE_IRQPRIO_MAX) {
309 if (kvmppc_booke_irqprio_deliver(vcpu, priority)) 429 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
310 break; 430 break;
311 431
@@ -319,8 +439,9 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
319} 439}
320 440
321/* Check pending exceptions and deliver one, if possible. */ 441/* Check pending exceptions and deliver one, if possible. */
322void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) 442int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
323{ 443{
444 int r = 0;
324 WARN_ON_ONCE(!irqs_disabled()); 445 WARN_ON_ONCE(!irqs_disabled());
325 446
326 kvmppc_core_check_exceptions(vcpu); 447 kvmppc_core_check_exceptions(vcpu);
@@ -328,16 +449,60 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
328 if (vcpu->arch.shared->msr & MSR_WE) { 449 if (vcpu->arch.shared->msr & MSR_WE) {
329 local_irq_enable(); 450 local_irq_enable();
330 kvm_vcpu_block(vcpu); 451 kvm_vcpu_block(vcpu);
452 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
331 local_irq_disable(); 453 local_irq_disable();
332 454
333 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); 455 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
334 kvmppc_core_check_exceptions(vcpu); 456 r = 1;
335 }; 457 };
458
459 return r;
460}
461
462/*
463 * Common checks before entering the guest world. Call with interrupts
464 * disabled.
465 *
466 * returns !0 if a signal is pending and check_signal is true
467 */
468static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
469{
470 int r = 0;
471
472 WARN_ON_ONCE(!irqs_disabled());
473 while (true) {
474 if (need_resched()) {
475 local_irq_enable();
476 cond_resched();
477 local_irq_disable();
478 continue;
479 }
480
481 if (signal_pending(current)) {
482 r = 1;
483 break;
484 }
485
486 if (kvmppc_core_prepare_to_enter(vcpu)) {
487 /* interrupts got enabled in between, so we
488 are back at square 1 */
489 continue;
490 }
491
492 break;
493 }
494
495 return r;
336} 496}
337 497
338int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 498int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
339{ 499{
340 int ret; 500 int ret;
501#ifdef CONFIG_PPC_FPU
502 unsigned int fpscr;
503 int fpexc_mode;
504 u64 fpr[32];
505#endif
341 506
342 if (!vcpu->arch.sane) { 507 if (!vcpu->arch.sane) {
343 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 508 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -345,17 +510,53 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
345 } 510 }
346 511
347 local_irq_disable(); 512 local_irq_disable();
348 513 if (kvmppc_prepare_to_enter(vcpu)) {
349 kvmppc_core_prepare_to_enter(vcpu);
350
351 if (signal_pending(current)) {
352 kvm_run->exit_reason = KVM_EXIT_INTR; 514 kvm_run->exit_reason = KVM_EXIT_INTR;
353 ret = -EINTR; 515 ret = -EINTR;
354 goto out; 516 goto out;
355 } 517 }
356 518
357 kvm_guest_enter(); 519 kvm_guest_enter();
520
521#ifdef CONFIG_PPC_FPU
522 /* Save userspace FPU state in stack */
523 enable_kernel_fp();
524 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
525 fpscr = current->thread.fpscr.val;
526 fpexc_mode = current->thread.fpexc_mode;
527
528 /* Restore guest FPU state to thread */
529 memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
530 current->thread.fpscr.val = vcpu->arch.fpscr;
531
532 /*
533 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
534 * as always using the FPU. Kernel usage of FP (via
535 * enable_kernel_fp()) in this thread must not occur while
536 * vcpu->fpu_active is set.
537 */
538 vcpu->fpu_active = 1;
539
540 kvmppc_load_guest_fp(vcpu);
541#endif
542
358 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 543 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
544
545#ifdef CONFIG_PPC_FPU
546 kvmppc_save_guest_fp(vcpu);
547
548 vcpu->fpu_active = 0;
549
550 /* Save guest FPU state from thread */
551 memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
552 vcpu->arch.fpscr = current->thread.fpscr.val;
553
554 /* Restore userspace FPU state from stack */
555 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
556 current->thread.fpscr.val = fpscr;
557 current->thread.fpexc_mode = fpexc_mode;
558#endif
559
359 kvm_guest_exit(); 560 kvm_guest_exit();
360 561
361out: 562out:
@@ -363,6 +564,84 @@ out:
363 return ret; 564 return ret;
364} 565}
365 566
567static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
568{
569 enum emulation_result er;
570
571 er = kvmppc_emulate_instruction(run, vcpu);
572 switch (er) {
573 case EMULATE_DONE:
574 /* don't overwrite subtypes, just account kvm_stats */
575 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
576 /* Future optimization: only reload non-volatiles if
577 * they were actually modified by emulation. */
578 return RESUME_GUEST_NV;
579
580 case EMULATE_DO_DCR:
581 run->exit_reason = KVM_EXIT_DCR;
582 return RESUME_HOST;
583
584 case EMULATE_FAIL:
585 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
586 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
587 /* For debugging, encode the failing instruction and
588 * report it to userspace. */
589 run->hw.hardware_exit_reason = ~0ULL << 32;
590 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
591 kvmppc_core_queue_program(vcpu, ESR_PIL);
592 return RESUME_HOST;
593
594 default:
595 BUG();
596 }
597}
598
599static void kvmppc_fill_pt_regs(struct pt_regs *regs)
600{
601 ulong r1, ip, msr, lr;
602
603 asm("mr %0, 1" : "=r"(r1));
604 asm("mflr %0" : "=r"(lr));
605 asm("mfmsr %0" : "=r"(msr));
606 asm("bl 1f; 1: mflr %0" : "=r"(ip));
607
608 memset(regs, 0, sizeof(*regs));
609 regs->gpr[1] = r1;
610 regs->nip = ip;
611 regs->msr = msr;
612 regs->link = lr;
613}
614
615static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
616 unsigned int exit_nr)
617{
618 struct pt_regs regs;
619
620 switch (exit_nr) {
621 case BOOKE_INTERRUPT_EXTERNAL:
622 kvmppc_fill_pt_regs(&regs);
623 do_IRQ(&regs);
624 break;
625 case BOOKE_INTERRUPT_DECREMENTER:
626 kvmppc_fill_pt_regs(&regs);
627 timer_interrupt(&regs);
628 break;
629#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
630 case BOOKE_INTERRUPT_DOORBELL:
631 kvmppc_fill_pt_regs(&regs);
632 doorbell_exception(&regs);
633 break;
634#endif
635 case BOOKE_INTERRUPT_MACHINE_CHECK:
636 /* FIXME */
637 break;
638 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
639 kvmppc_fill_pt_regs(&regs);
640 performance_monitor_exception(&regs);
641 break;
642 }
643}
644
366/** 645/**
367 * kvmppc_handle_exit 646 * kvmppc_handle_exit
368 * 647 *
@@ -371,12 +650,14 @@ out:
371int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 650int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
372 unsigned int exit_nr) 651 unsigned int exit_nr)
373{ 652{
374 enum emulation_result er;
375 int r = RESUME_HOST; 653 int r = RESUME_HOST;
376 654
377 /* update before a new last_exit_type is rewritten */ 655 /* update before a new last_exit_type is rewritten */
378 kvmppc_update_timing_stats(vcpu); 656 kvmppc_update_timing_stats(vcpu);
379 657
658 /* restart interrupts if they were meant for the host */
659 kvmppc_restart_interrupt(vcpu, exit_nr);
660
380 local_irq_enable(); 661 local_irq_enable();
381 662
382 run->exit_reason = KVM_EXIT_UNKNOWN; 663 run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -386,62 +667,74 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
386 case BOOKE_INTERRUPT_MACHINE_CHECK: 667 case BOOKE_INTERRUPT_MACHINE_CHECK:
387 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); 668 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
388 kvmppc_dump_vcpu(vcpu); 669 kvmppc_dump_vcpu(vcpu);
670 /* For debugging, send invalid exit reason to user space */
671 run->hw.hardware_exit_reason = ~1ULL << 32;
672 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
389 r = RESUME_HOST; 673 r = RESUME_HOST;
390 break; 674 break;
391 675
392 case BOOKE_INTERRUPT_EXTERNAL: 676 case BOOKE_INTERRUPT_EXTERNAL:
393 kvmppc_account_exit(vcpu, EXT_INTR_EXITS); 677 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
394 if (need_resched())
395 cond_resched();
396 r = RESUME_GUEST; 678 r = RESUME_GUEST;
397 break; 679 break;
398 680
399 case BOOKE_INTERRUPT_DECREMENTER: 681 case BOOKE_INTERRUPT_DECREMENTER:
400 /* Since we switched IVPR back to the host's value, the host
401 * handled this interrupt the moment we enabled interrupts.
402 * Now we just offer it a chance to reschedule the guest. */
403 kvmppc_account_exit(vcpu, DEC_EXITS); 682 kvmppc_account_exit(vcpu, DEC_EXITS);
404 if (need_resched())
405 cond_resched();
406 r = RESUME_GUEST; 683 r = RESUME_GUEST;
407 break; 684 break;
408 685
686 case BOOKE_INTERRUPT_DOORBELL:
687 kvmppc_account_exit(vcpu, DBELL_EXITS);
688 r = RESUME_GUEST;
689 break;
690
691 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
692 kvmppc_account_exit(vcpu, GDBELL_EXITS);
693
694 /*
695 * We are here because there is a pending guest interrupt
696 * which could not be delivered as MSR_CE or MSR_ME was not
697 * set. Once we break from here we will retry delivery.
698 */
699 r = RESUME_GUEST;
700 break;
701
702 case BOOKE_INTERRUPT_GUEST_DBELL:
703 kvmppc_account_exit(vcpu, GDBELL_EXITS);
704
705 /*
706 * We are here because there is a pending guest interrupt
707 * which could not be delivered as MSR_EE was not set. Once
708 * we break from here we will retry delivery.
709 */
710 r = RESUME_GUEST;
711 break;
712
713 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
714 r = RESUME_GUEST;
715 break;
716
717 case BOOKE_INTERRUPT_HV_PRIV:
718 r = emulation_exit(run, vcpu);
719 break;
720
409 case BOOKE_INTERRUPT_PROGRAM: 721 case BOOKE_INTERRUPT_PROGRAM:
410 if (vcpu->arch.shared->msr & MSR_PR) { 722 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
411 /* Program traps generated by user-level software must be handled 723 /*
412 * by the guest kernel. */ 724 * Program traps generated by user-level software must
725 * be handled by the guest kernel.
726 *
727 * In GS mode, hypervisor privileged instructions trap
728 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
729 * actual program interrupts, handled by the guest.
730 */
413 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); 731 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
414 r = RESUME_GUEST; 732 r = RESUME_GUEST;
415 kvmppc_account_exit(vcpu, USR_PR_INST); 733 kvmppc_account_exit(vcpu, USR_PR_INST);
416 break; 734 break;
417 } 735 }
418 736
419 er = kvmppc_emulate_instruction(run, vcpu); 737 r = emulation_exit(run, vcpu);
420 switch (er) {
421 case EMULATE_DONE:
422 /* don't overwrite subtypes, just account kvm_stats */
423 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
424 /* Future optimization: only reload non-volatiles if
425 * they were actually modified by emulation. */
426 r = RESUME_GUEST_NV;
427 break;
428 case EMULATE_DO_DCR:
429 run->exit_reason = KVM_EXIT_DCR;
430 r = RESUME_HOST;
431 break;
432 case EMULATE_FAIL:
433 /* XXX Deliver Program interrupt to guest. */
434 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
435 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
436 /* For debugging, encode the failing instruction and
437 * report it to userspace. */
438 run->hw.hardware_exit_reason = ~0ULL << 32;
439 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
440 r = RESUME_HOST;
441 break;
442 default:
443 BUG();
444 }
445 break; 738 break;
446 739
447 case BOOKE_INTERRUPT_FP_UNAVAIL: 740 case BOOKE_INTERRUPT_FP_UNAVAIL:
@@ -506,6 +799,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
506 r = RESUME_GUEST; 799 r = RESUME_GUEST;
507 break; 800 break;
508 801
802#ifdef CONFIG_KVM_BOOKE_HV
803 case BOOKE_INTERRUPT_HV_SYSCALL:
804 if (!(vcpu->arch.shared->msr & MSR_PR)) {
805 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
806 } else {
807 /*
808 * hcall from guest userspace -- send privileged
809 * instruction program check.
810 */
811 kvmppc_core_queue_program(vcpu, ESR_PPR);
812 }
813
814 r = RESUME_GUEST;
815 break;
816#else
509 case BOOKE_INTERRUPT_SYSCALL: 817 case BOOKE_INTERRUPT_SYSCALL:
510 if (!(vcpu->arch.shared->msr & MSR_PR) && 818 if (!(vcpu->arch.shared->msr & MSR_PR) &&
511 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { 819 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
@@ -519,6 +827,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
519 kvmppc_account_exit(vcpu, SYSCALL_EXITS); 827 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
520 r = RESUME_GUEST; 828 r = RESUME_GUEST;
521 break; 829 break;
830#endif
522 831
523 case BOOKE_INTERRUPT_DTLB_MISS: { 832 case BOOKE_INTERRUPT_DTLB_MISS: {
524 unsigned long eaddr = vcpu->arch.fault_dear; 833 unsigned long eaddr = vcpu->arch.fault_dear;
@@ -526,7 +835,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
526 gpa_t gpaddr; 835 gpa_t gpaddr;
527 gfn_t gfn; 836 gfn_t gfn;
528 837
529#ifdef CONFIG_KVM_E500 838#ifdef CONFIG_KVM_E500V2
530 if (!(vcpu->arch.shared->msr & MSR_PR) && 839 if (!(vcpu->arch.shared->msr & MSR_PR) &&
531 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { 840 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
532 kvmppc_map_magic(vcpu); 841 kvmppc_map_magic(vcpu);
@@ -567,6 +876,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
567 /* Guest has mapped and accessed a page which is not 876 /* Guest has mapped and accessed a page which is not
568 * actually RAM. */ 877 * actually RAM. */
569 vcpu->arch.paddr_accessed = gpaddr; 878 vcpu->arch.paddr_accessed = gpaddr;
879 vcpu->arch.vaddr_accessed = eaddr;
570 r = kvmppc_emulate_mmio(run, vcpu); 880 r = kvmppc_emulate_mmio(run, vcpu);
571 kvmppc_account_exit(vcpu, MMIO_EXITS); 881 kvmppc_account_exit(vcpu, MMIO_EXITS);
572 } 882 }
@@ -634,15 +944,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
634 BUG(); 944 BUG();
635 } 945 }
636 946
637 local_irq_disable(); 947 /*
638 948 * To avoid clobbering exit_reason, only check for signals if we
639 kvmppc_core_prepare_to_enter(vcpu); 949 * aren't already exiting to userspace for some other reason.
640 950 */
641 if (!(r & RESUME_HOST)) { 951 if (!(r & RESUME_HOST)) {
642 /* To avoid clobbering exit_reason, only check for signals if 952 local_irq_disable();
643 * we aren't already exiting to userspace for some other 953 if (kvmppc_prepare_to_enter(vcpu)) {
644 * reason. */
645 if (signal_pending(current)) {
646 run->exit_reason = KVM_EXIT_INTR; 954 run->exit_reason = KVM_EXIT_INTR;
647 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); 955 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
648 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 956 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
@@ -659,12 +967,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
659 int r; 967 int r;
660 968
661 vcpu->arch.pc = 0; 969 vcpu->arch.pc = 0;
662 vcpu->arch.shared->msr = 0;
663 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
664 vcpu->arch.shared->pir = vcpu->vcpu_id; 970 vcpu->arch.shared->pir = vcpu->vcpu_id;
665 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ 971 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
972 kvmppc_set_msr(vcpu, 0);
666 973
974#ifndef CONFIG_KVM_BOOKE_HV
975 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
667 vcpu->arch.shadow_pid = 1; 976 vcpu->arch.shadow_pid = 1;
977 vcpu->arch.shared->msr = 0;
978#endif
668 979
669 /* Eye-catching numbers so we know if the guest takes an interrupt 980 /* Eye-catching numbers so we know if the guest takes an interrupt
670 * before it's programmed its own IVPR/IVORs. */ 981 * before it's programmed its own IVPR/IVORs. */
@@ -745,8 +1056,8 @@ static void get_sregs_base(struct kvm_vcpu *vcpu,
745 sregs->u.e.csrr0 = vcpu->arch.csrr0; 1056 sregs->u.e.csrr0 = vcpu->arch.csrr0;
746 sregs->u.e.csrr1 = vcpu->arch.csrr1; 1057 sregs->u.e.csrr1 = vcpu->arch.csrr1;
747 sregs->u.e.mcsr = vcpu->arch.mcsr; 1058 sregs->u.e.mcsr = vcpu->arch.mcsr;
748 sregs->u.e.esr = vcpu->arch.shared->esr; 1059 sregs->u.e.esr = get_guest_esr(vcpu);
749 sregs->u.e.dear = vcpu->arch.shared->dar; 1060 sregs->u.e.dear = get_guest_dear(vcpu);
750 sregs->u.e.tsr = vcpu->arch.tsr; 1061 sregs->u.e.tsr = vcpu->arch.tsr;
751 sregs->u.e.tcr = vcpu->arch.tcr; 1062 sregs->u.e.tcr = vcpu->arch.tcr;
752 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); 1063 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
@@ -763,8 +1074,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
763 vcpu->arch.csrr0 = sregs->u.e.csrr0; 1074 vcpu->arch.csrr0 = sregs->u.e.csrr0;
764 vcpu->arch.csrr1 = sregs->u.e.csrr1; 1075 vcpu->arch.csrr1 = sregs->u.e.csrr1;
765 vcpu->arch.mcsr = sregs->u.e.mcsr; 1076 vcpu->arch.mcsr = sregs->u.e.mcsr;
766 vcpu->arch.shared->esr = sregs->u.e.esr; 1077 set_guest_esr(vcpu, sregs->u.e.esr);
767 vcpu->arch.shared->dar = sregs->u.e.dear; 1078 set_guest_dear(vcpu, sregs->u.e.dear);
768 vcpu->arch.vrsave = sregs->u.e.vrsave; 1079 vcpu->arch.vrsave = sregs->u.e.vrsave;
769 kvmppc_set_tcr(vcpu, sregs->u.e.tcr); 1080 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
770 1081
@@ -932,15 +1243,6 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
932{ 1243{
933} 1244}
934 1245
935int kvmppc_core_init_vm(struct kvm *kvm)
936{
937 return 0;
938}
939
940void kvmppc_core_destroy_vm(struct kvm *kvm)
941{
942}
943
944void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) 1246void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
945{ 1247{
946 vcpu->arch.tcr = new_tcr; 1248 vcpu->arch.tcr = new_tcr;
@@ -968,8 +1270,19 @@ void kvmppc_decrementer_func(unsigned long data)
968 kvmppc_set_tsr_bits(vcpu, TSR_DIS); 1270 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
969} 1271}
970 1272
1273void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1274{
1275 current->thread.kvm_vcpu = vcpu;
1276}
1277
1278void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1279{
1280 current->thread.kvm_vcpu = NULL;
1281}
1282
971int __init kvmppc_booke_init(void) 1283int __init kvmppc_booke_init(void)
972{ 1284{
1285#ifndef CONFIG_KVM_BOOKE_HV
973 unsigned long ivor[16]; 1286 unsigned long ivor[16];
974 unsigned long max_ivor = 0; 1287 unsigned long max_ivor = 0;
975 int i; 1288 int i;
@@ -1012,7 +1325,7 @@ int __init kvmppc_booke_init(void)
1012 } 1325 }
1013 flush_icache_range(kvmppc_booke_handlers, 1326 flush_icache_range(kvmppc_booke_handlers,
1014 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); 1327 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
1015 1328#endif /* !BOOKE_HV */
1016 return 0; 1329 return 0;
1017} 1330}
1018 1331