aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/powerpc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/powerpc.c')
-rw-r--r--arch/powerpc/kvm/powerpc.c58
1 files changed, 35 insertions, 23 deletions
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 9ae97686e9f4..3cf541a53e2a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -68,14 +68,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
68 */ 68 */
69int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) 69int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
70{ 70{
71 int r = 1; 71 int r;
72
73 WARN_ON(irqs_disabled());
74 hard_irq_disable();
72 75
73 WARN_ON_ONCE(!irqs_disabled());
74 while (true) { 76 while (true) {
75 if (need_resched()) { 77 if (need_resched()) {
76 local_irq_enable(); 78 local_irq_enable();
77 cond_resched(); 79 cond_resched();
78 local_irq_disable(); 80 hard_irq_disable();
79 continue; 81 continue;
80 } 82 }
81 83
@@ -101,7 +103,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
101 local_irq_enable(); 103 local_irq_enable();
102 trace_kvm_check_requests(vcpu); 104 trace_kvm_check_requests(vcpu);
103 r = kvmppc_core_check_requests(vcpu); 105 r = kvmppc_core_check_requests(vcpu);
104 local_irq_disable(); 106 hard_irq_disable();
105 if (r > 0) 107 if (r > 0)
106 continue; 108 continue;
107 break; 109 break;
@@ -113,22 +115,12 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
113 continue; 115 continue;
114 } 116 }
115 117
116#ifdef CONFIG_PPC64
117 /* lazy EE magic */
118 hard_irq_disable();
119 if (lazy_irq_pending()) {
120 /* Got an interrupt in between, try again */
121 local_irq_enable();
122 local_irq_disable();
123 kvm_guest_exit();
124 continue;
125 }
126#endif
127
128 kvm_guest_enter(); 118 kvm_guest_enter();
129 break; 119 return 1;
130 } 120 }
131 121
122 /* return to host */
123 local_irq_enable();
132 return r; 124 return r;
133} 125}
134EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); 126EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
@@ -656,14 +648,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
656 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 648 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
657 break; 649 break;
658 case KVM_MMIO_REG_FPR: 650 case KVM_MMIO_REG_FPR:
659 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 651 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
660 break; 652 break;
661#ifdef CONFIG_PPC_BOOK3S 653#ifdef CONFIG_PPC_BOOK3S
662 case KVM_MMIO_REG_QPR: 654 case KVM_MMIO_REG_QPR:
663 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 655 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
664 break; 656 break;
665 case KVM_MMIO_REG_FQPR: 657 case KVM_MMIO_REG_FQPR:
666 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 658 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
667 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 659 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
668 break; 660 break;
669#endif 661#endif
@@ -673,9 +665,19 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
673} 665}
674 666
675int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 667int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
676 unsigned int rt, unsigned int bytes, int is_bigendian) 668 unsigned int rt, unsigned int bytes,
669 int is_default_endian)
677{ 670{
678 int idx, ret; 671 int idx, ret;
672 int is_bigendian;
673
674 if (kvmppc_need_byteswap(vcpu)) {
675 /* Default endianness is "little endian". */
676 is_bigendian = !is_default_endian;
677 } else {
678 /* Default endianness is "big endian". */
679 is_bigendian = is_default_endian;
680 }
679 681
680 if (bytes > sizeof(run->mmio.data)) { 682 if (bytes > sizeof(run->mmio.data)) {
681 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 683 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
@@ -711,21 +713,31 @@ EXPORT_SYMBOL_GPL(kvmppc_handle_load);
711 713
712/* Same as above, but sign extends */ 714/* Same as above, but sign extends */
713int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 715int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
714 unsigned int rt, unsigned int bytes, int is_bigendian) 716 unsigned int rt, unsigned int bytes,
717 int is_default_endian)
715{ 718{
716 int r; 719 int r;
717 720
718 vcpu->arch.mmio_sign_extend = 1; 721 vcpu->arch.mmio_sign_extend = 1;
719 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); 722 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
720 723
721 return r; 724 return r;
722} 725}
723 726
724int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 727int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
725 u64 val, unsigned int bytes, int is_bigendian) 728 u64 val, unsigned int bytes, int is_default_endian)
726{ 729{
727 void *data = run->mmio.data; 730 void *data = run->mmio.data;
728 int idx, ret; 731 int idx, ret;
732 int is_bigendian;
733
734 if (kvmppc_need_byteswap(vcpu)) {
735 /* Default endianness is "little endian". */
736 is_bigendian = !is_default_endian;
737 } else {
738 /* Default endianness is "big endian". */
739 is_bigendian = is_default_endian;
740 }
729 741
730 if (bytes > sizeof(run->mmio.data)) { 742 if (bytes > sizeof(run->mmio.data)) {
731 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 743 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,