diff options
-rw-r--r-- | arch/powerpc/kvm/44x_emulate.c | 27 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_emulate.c | 72 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_emulate.c | 7 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500_emulate.c | 16 | ||||
-rw-r--r-- | arch/powerpc/kvm/emulate.c | 71 |
5 files changed, 56 insertions, 137 deletions
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 549bb2c9a47a..da81a2d92380 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c | |||
@@ -37,22 +37,19 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
37 | unsigned int inst, int *advance) | 37 | unsigned int inst, int *advance) |
38 | { | 38 | { |
39 | int emulated = EMULATE_DONE; | 39 | int emulated = EMULATE_DONE; |
40 | int dcrn; | 40 | int dcrn = get_dcrn(inst); |
41 | int ra; | 41 | int ra = get_ra(inst); |
42 | int rb; | 42 | int rb = get_rb(inst); |
43 | int rc; | 43 | int rc = get_rc(inst); |
44 | int rs; | 44 | int rs = get_rs(inst); |
45 | int rt; | 45 | int rt = get_rt(inst); |
46 | int ws; | 46 | int ws = get_ws(inst); |
47 | 47 | ||
48 | switch (get_op(inst)) { | 48 | switch (get_op(inst)) { |
49 | case 31: | 49 | case 31: |
50 | switch (get_xop(inst)) { | 50 | switch (get_xop(inst)) { |
51 | 51 | ||
52 | case XOP_MFDCR: | 52 | case XOP_MFDCR: |
53 | dcrn = get_dcrn(inst); | ||
54 | rt = get_rt(inst); | ||
55 | |||
56 | /* The guest may access CPR0 registers to determine the timebase | 53 | /* The guest may access CPR0 registers to determine the timebase |
57 | * frequency, and it must know the real host frequency because it | 54 | * frequency, and it must know the real host frequency because it |
58 | * can directly access the timebase registers. | 55 | * can directly access the timebase registers. |
@@ -88,9 +85,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
88 | break; | 85 | break; |
89 | 86 | ||
90 | case XOP_MTDCR: | 87 | case XOP_MTDCR: |
91 | dcrn = get_dcrn(inst); | ||
92 | rs = get_rs(inst); | ||
93 | |||
94 | /* emulate some access in kernel */ | 88 | /* emulate some access in kernel */ |
95 | switch (dcrn) { | 89 | switch (dcrn) { |
96 | case DCRN_CPR0_CONFIG_ADDR: | 90 | case DCRN_CPR0_CONFIG_ADDR: |
@@ -108,17 +102,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
108 | break; | 102 | break; |
109 | 103 | ||
110 | case XOP_TLBWE: | 104 | case XOP_TLBWE: |
111 | ra = get_ra(inst); | ||
112 | rs = get_rs(inst); | ||
113 | ws = get_ws(inst); | ||
114 | emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); | 105 | emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); |
115 | break; | 106 | break; |
116 | 107 | ||
117 | case XOP_TLBSX: | 108 | case XOP_TLBSX: |
118 | rt = get_rt(inst); | ||
119 | ra = get_ra(inst); | ||
120 | rb = get_rb(inst); | ||
121 | rc = get_rc(inst); | ||
122 | emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); | 109 | emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); |
123 | break; | 110 | break; |
124 | 111 | ||
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 135663a3e4fc..c023bcd253ff 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c | |||
@@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
87 | unsigned int inst, int *advance) | 87 | unsigned int inst, int *advance) |
88 | { | 88 | { |
89 | int emulated = EMULATE_DONE; | 89 | int emulated = EMULATE_DONE; |
90 | int rt = get_rt(inst); | ||
91 | int rs = get_rs(inst); | ||
92 | int ra = get_ra(inst); | ||
93 | int rb = get_rb(inst); | ||
90 | 94 | ||
91 | switch (get_op(inst)) { | 95 | switch (get_op(inst)) { |
92 | case 19: | 96 | case 19: |
@@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
106 | case 31: | 110 | case 31: |
107 | switch (get_xop(inst)) { | 111 | switch (get_xop(inst)) { |
108 | case OP_31_XOP_MFMSR: | 112 | case OP_31_XOP_MFMSR: |
109 | kvmppc_set_gpr(vcpu, get_rt(inst), | 113 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); |
110 | vcpu->arch.shared->msr); | ||
111 | break; | 114 | break; |
112 | case OP_31_XOP_MTMSRD: | 115 | case OP_31_XOP_MTMSRD: |
113 | { | 116 | { |
114 | ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); | 117 | ulong rs_val = kvmppc_get_gpr(vcpu, rs); |
115 | if (inst & 0x10000) { | 118 | if (inst & 0x10000) { |
116 | vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE); | 119 | ulong new_msr = vcpu->arch.shared->msr; |
117 | vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE); | 120 | new_msr &= ~(MSR_RI | MSR_EE); |
121 | new_msr |= rs_val & (MSR_RI | MSR_EE); | ||
122 | vcpu->arch.shared->msr = new_msr; | ||
118 | } else | 123 | } else |
119 | kvmppc_set_msr(vcpu, rs); | 124 | kvmppc_set_msr(vcpu, rs_val); |
120 | break; | 125 | break; |
121 | } | 126 | } |
122 | case OP_31_XOP_MTMSR: | 127 | case OP_31_XOP_MTMSR: |
123 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); | 128 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); |
124 | break; | 129 | break; |
125 | case OP_31_XOP_MFSR: | 130 | case OP_31_XOP_MFSR: |
126 | { | 131 | { |
@@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
130 | if (vcpu->arch.mmu.mfsrin) { | 135 | if (vcpu->arch.mmu.mfsrin) { |
131 | u32 sr; | 136 | u32 sr; |
132 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); | 137 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); |
133 | kvmppc_set_gpr(vcpu, get_rt(inst), sr); | 138 | kvmppc_set_gpr(vcpu, rt, sr); |
134 | } | 139 | } |
135 | break; | 140 | break; |
136 | } | 141 | } |
@@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
138 | { | 143 | { |
139 | int srnum; | 144 | int srnum; |
140 | 145 | ||
141 | srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf; | 146 | srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf; |
142 | if (vcpu->arch.mmu.mfsrin) { | 147 | if (vcpu->arch.mmu.mfsrin) { |
143 | u32 sr; | 148 | u32 sr; |
144 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); | 149 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); |
145 | kvmppc_set_gpr(vcpu, get_rt(inst), sr); | 150 | kvmppc_set_gpr(vcpu, rt, sr); |
146 | } | 151 | } |
147 | break; | 152 | break; |
148 | } | 153 | } |
149 | case OP_31_XOP_MTSR: | 154 | case OP_31_XOP_MTSR: |
150 | vcpu->arch.mmu.mtsrin(vcpu, | 155 | vcpu->arch.mmu.mtsrin(vcpu, |
151 | (inst >> 16) & 0xf, | 156 | (inst >> 16) & 0xf, |
152 | kvmppc_get_gpr(vcpu, get_rs(inst))); | 157 | kvmppc_get_gpr(vcpu, rs)); |
153 | break; | 158 | break; |
154 | case OP_31_XOP_MTSRIN: | 159 | case OP_31_XOP_MTSRIN: |
155 | vcpu->arch.mmu.mtsrin(vcpu, | 160 | vcpu->arch.mmu.mtsrin(vcpu, |
156 | (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, | 161 | (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf, |
157 | kvmppc_get_gpr(vcpu, get_rs(inst))); | 162 | kvmppc_get_gpr(vcpu, rs)); |
158 | break; | 163 | break; |
159 | case OP_31_XOP_TLBIE: | 164 | case OP_31_XOP_TLBIE: |
160 | case OP_31_XOP_TLBIEL: | 165 | case OP_31_XOP_TLBIEL: |
161 | { | 166 | { |
162 | bool large = (inst & 0x00200000) ? true : false; | 167 | bool large = (inst & 0x00200000) ? true : false; |
163 | ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst)); | 168 | ulong addr = kvmppc_get_gpr(vcpu, rb); |
164 | vcpu->arch.mmu.tlbie(vcpu, addr, large); | 169 | vcpu->arch.mmu.tlbie(vcpu, addr, large); |
165 | break; | 170 | break; |
166 | } | 171 | } |
@@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
171 | return EMULATE_FAIL; | 176 | return EMULATE_FAIL; |
172 | 177 | ||
173 | vcpu->arch.mmu.slbmte(vcpu, | 178 | vcpu->arch.mmu.slbmte(vcpu, |
174 | kvmppc_get_gpr(vcpu, get_rs(inst)), | 179 | kvmppc_get_gpr(vcpu, rs), |
175 | kvmppc_get_gpr(vcpu, get_rb(inst))); | 180 | kvmppc_get_gpr(vcpu, rb)); |
176 | break; | 181 | break; |
177 | case OP_31_XOP_SLBIE: | 182 | case OP_31_XOP_SLBIE: |
178 | if (!vcpu->arch.mmu.slbie) | 183 | if (!vcpu->arch.mmu.slbie) |
179 | return EMULATE_FAIL; | 184 | return EMULATE_FAIL; |
180 | 185 | ||
181 | vcpu->arch.mmu.slbie(vcpu, | 186 | vcpu->arch.mmu.slbie(vcpu, |
182 | kvmppc_get_gpr(vcpu, get_rb(inst))); | 187 | kvmppc_get_gpr(vcpu, rb)); |
183 | break; | 188 | break; |
184 | case OP_31_XOP_SLBIA: | 189 | case OP_31_XOP_SLBIA: |
185 | if (!vcpu->arch.mmu.slbia) | 190 | if (!vcpu->arch.mmu.slbia) |
@@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
191 | if (!vcpu->arch.mmu.slbmfee) { | 196 | if (!vcpu->arch.mmu.slbmfee) { |
192 | emulated = EMULATE_FAIL; | 197 | emulated = EMULATE_FAIL; |
193 | } else { | 198 | } else { |
194 | ulong t, rb; | 199 | ulong t, rb_val; |
195 | 200 | ||
196 | rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | 201 | rb_val = kvmppc_get_gpr(vcpu, rb); |
197 | t = vcpu->arch.mmu.slbmfee(vcpu, rb); | 202 | t = vcpu->arch.mmu.slbmfee(vcpu, rb_val); |
198 | kvmppc_set_gpr(vcpu, get_rt(inst), t); | 203 | kvmppc_set_gpr(vcpu, rt, t); |
199 | } | 204 | } |
200 | break; | 205 | break; |
201 | case OP_31_XOP_SLBMFEV: | 206 | case OP_31_XOP_SLBMFEV: |
202 | if (!vcpu->arch.mmu.slbmfev) { | 207 | if (!vcpu->arch.mmu.slbmfev) { |
203 | emulated = EMULATE_FAIL; | 208 | emulated = EMULATE_FAIL; |
204 | } else { | 209 | } else { |
205 | ulong t, rb; | 210 | ulong t, rb_val; |
206 | 211 | ||
207 | rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | 212 | rb_val = kvmppc_get_gpr(vcpu, rb); |
208 | t = vcpu->arch.mmu.slbmfev(vcpu, rb); | 213 | t = vcpu->arch.mmu.slbmfev(vcpu, rb_val); |
209 | kvmppc_set_gpr(vcpu, get_rt(inst), t); | 214 | kvmppc_set_gpr(vcpu, rt, t); |
210 | } | 215 | } |
211 | break; | 216 | break; |
212 | case OP_31_XOP_DCBA: | 217 | case OP_31_XOP_DCBA: |
@@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
214 | break; | 219 | break; |
215 | case OP_31_XOP_DCBZ: | 220 | case OP_31_XOP_DCBZ: |
216 | { | 221 | { |
217 | ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | 222 | ulong rb_val = kvmppc_get_gpr(vcpu, rb); |
218 | ulong ra = 0; | 223 | ulong ra_val = 0; |
219 | ulong addr, vaddr; | 224 | ulong addr, vaddr; |
220 | u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; | 225 | u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; |
221 | u32 dsisr; | 226 | u32 dsisr; |
222 | int r; | 227 | int r; |
223 | 228 | ||
224 | if (get_ra(inst)) | 229 | if (ra) |
225 | ra = kvmppc_get_gpr(vcpu, get_ra(inst)); | 230 | ra_val = kvmppc_get_gpr(vcpu, ra); |
226 | 231 | ||
227 | addr = (ra + rb) & ~31ULL; | 232 | addr = (ra_val + rb_val) & ~31ULL; |
228 | if (!(vcpu->arch.shared->msr & MSR_SF)) | 233 | if (!(vcpu->arch.shared->msr & MSR_SF)) |
229 | addr &= 0xffffffff; | 234 | addr &= 0xffffffff; |
230 | vaddr = addr; | 235 | vaddr = addr; |
@@ -565,23 +570,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) | |||
565 | ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) | 570 | ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) |
566 | { | 571 | { |
567 | ulong dar = 0; | 572 | ulong dar = 0; |
568 | ulong ra; | 573 | ulong ra = get_ra(inst); |
574 | ulong rb = get_rb(inst); | ||
569 | 575 | ||
570 | switch (get_op(inst)) { | 576 | switch (get_op(inst)) { |
571 | case OP_LFS: | 577 | case OP_LFS: |
572 | case OP_LFD: | 578 | case OP_LFD: |
573 | case OP_STFD: | 579 | case OP_STFD: |
574 | case OP_STFS: | 580 | case OP_STFS: |
575 | ra = get_ra(inst); | ||
576 | if (ra) | 581 | if (ra) |
577 | dar = kvmppc_get_gpr(vcpu, ra); | 582 | dar = kvmppc_get_gpr(vcpu, ra); |
578 | dar += (s32)((s16)inst); | 583 | dar += (s32)((s16)inst); |
579 | break; | 584 | break; |
580 | case 31: | 585 | case 31: |
581 | ra = get_ra(inst); | ||
582 | if (ra) | 586 | if (ra) |
583 | dar = kvmppc_get_gpr(vcpu, ra); | 587 | dar = kvmppc_get_gpr(vcpu, ra); |
584 | dar += kvmppc_get_gpr(vcpu, get_rb(inst)); | 588 | dar += kvmppc_get_gpr(vcpu, rb); |
585 | break; | 589 | break; |
586 | default: | 590 | default: |
587 | printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); | 591 | printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); |
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 904412bbea40..e14f7b23fd3a 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c | |||
@@ -40,8 +40,8 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
40 | unsigned int inst, int *advance) | 40 | unsigned int inst, int *advance) |
41 | { | 41 | { |
42 | int emulated = EMULATE_DONE; | 42 | int emulated = EMULATE_DONE; |
43 | int rs; | 43 | int rs = get_rs(inst); |
44 | int rt; | 44 | int rt = get_rt(inst); |
45 | 45 | ||
46 | switch (get_op(inst)) { | 46 | switch (get_op(inst)) { |
47 | case 19: | 47 | case 19: |
@@ -62,19 +62,16 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
62 | switch (get_xop(inst)) { | 62 | switch (get_xop(inst)) { |
63 | 63 | ||
64 | case OP_31_XOP_MFMSR: | 64 | case OP_31_XOP_MFMSR: |
65 | rt = get_rt(inst); | ||
66 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); | 65 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); |
67 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); | 66 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); |
68 | break; | 67 | break; |
69 | 68 | ||
70 | case OP_31_XOP_MTMSR: | 69 | case OP_31_XOP_MTMSR: |
71 | rs = get_rs(inst); | ||
72 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); | 70 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); |
73 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); | 71 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); |
74 | break; | 72 | break; |
75 | 73 | ||
76 | case OP_31_XOP_WRTEE: | 74 | case OP_31_XOP_WRTEE: |
77 | rs = get_rs(inst); | ||
78 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) | 75 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) |
79 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); | 76 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); |
80 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | 77 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); |
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 99155f847a6a..9b2dcda71950 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c | |||
@@ -86,9 +86,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
86 | unsigned int inst, int *advance) | 86 | unsigned int inst, int *advance) |
87 | { | 87 | { |
88 | int emulated = EMULATE_DONE; | 88 | int emulated = EMULATE_DONE; |
89 | int ra; | 89 | int ra = get_ra(inst); |
90 | int rb; | 90 | int rb = get_rb(inst); |
91 | int rt; | 91 | int rt = get_rt(inst); |
92 | 92 | ||
93 | switch (get_op(inst)) { | 93 | switch (get_op(inst)) { |
94 | case 31: | 94 | case 31: |
@@ -96,11 +96,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
96 | 96 | ||
97 | #ifdef CONFIG_KVM_E500MC | 97 | #ifdef CONFIG_KVM_E500MC |
98 | case XOP_MSGSND: | 98 | case XOP_MSGSND: |
99 | emulated = kvmppc_e500_emul_msgsnd(vcpu, get_rb(inst)); | 99 | emulated = kvmppc_e500_emul_msgsnd(vcpu, rb); |
100 | break; | 100 | break; |
101 | 101 | ||
102 | case XOP_MSGCLR: | 102 | case XOP_MSGCLR: |
103 | emulated = kvmppc_e500_emul_msgclr(vcpu, get_rb(inst)); | 103 | emulated = kvmppc_e500_emul_msgclr(vcpu, rb); |
104 | break; | 104 | break; |
105 | #endif | 105 | #endif |
106 | 106 | ||
@@ -113,20 +113,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
113 | break; | 113 | break; |
114 | 114 | ||
115 | case XOP_TLBSX: | 115 | case XOP_TLBSX: |
116 | rb = get_rb(inst); | ||
117 | emulated = kvmppc_e500_emul_tlbsx(vcpu,rb); | 116 | emulated = kvmppc_e500_emul_tlbsx(vcpu,rb); |
118 | break; | 117 | break; |
119 | 118 | ||
120 | case XOP_TLBILX: | 119 | case XOP_TLBILX: |
121 | ra = get_ra(inst); | ||
122 | rb = get_rb(inst); | ||
123 | rt = get_rt(inst); | ||
124 | emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb); | 120 | emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb); |
125 | break; | 121 | break; |
126 | 122 | ||
127 | case XOP_TLBIVAX: | 123 | case XOP_TLBIVAX: |
128 | ra = get_ra(inst); | ||
129 | rb = get_rb(inst); | ||
130 | emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb); | 124 | emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb); |
131 | break; | 125 | break; |
132 | 126 | ||
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index a27d4dc3b4a3..f63b5cbd8221 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -148,11 +148,10 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) | |||
148 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | 148 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) |
149 | { | 149 | { |
150 | u32 inst = kvmppc_get_last_inst(vcpu); | 150 | u32 inst = kvmppc_get_last_inst(vcpu); |
151 | int ra; | 151 | int ra = get_ra(inst); |
152 | int rb; | 152 | int rs = get_rs(inst); |
153 | int rs; | 153 | int rt = get_rt(inst); |
154 | int rt; | 154 | int sprn = get_sprn(inst); |
155 | int sprn; | ||
156 | enum emulation_result emulated = EMULATE_DONE; | 155 | enum emulation_result emulated = EMULATE_DONE; |
157 | int advance = 1; | 156 | int advance = 1; |
158 | 157 | ||
@@ -189,43 +188,31 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
189 | advance = 0; | 188 | advance = 0; |
190 | break; | 189 | break; |
191 | case OP_31_XOP_LWZX: | 190 | case OP_31_XOP_LWZX: |
192 | rt = get_rt(inst); | ||
193 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 191 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
194 | break; | 192 | break; |
195 | 193 | ||
196 | case OP_31_XOP_LBZX: | 194 | case OP_31_XOP_LBZX: |
197 | rt = get_rt(inst); | ||
198 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 195 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
199 | break; | 196 | break; |
200 | 197 | ||
201 | case OP_31_XOP_LBZUX: | 198 | case OP_31_XOP_LBZUX: |
202 | rt = get_rt(inst); | ||
203 | ra = get_ra(inst); | ||
204 | rb = get_rb(inst); | ||
205 | |||
206 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 199 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
207 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 200 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
208 | break; | 201 | break; |
209 | 202 | ||
210 | case OP_31_XOP_STWX: | 203 | case OP_31_XOP_STWX: |
211 | rs = get_rs(inst); | ||
212 | emulated = kvmppc_handle_store(run, vcpu, | 204 | emulated = kvmppc_handle_store(run, vcpu, |
213 | kvmppc_get_gpr(vcpu, rs), | 205 | kvmppc_get_gpr(vcpu, rs), |
214 | 4, 1); | 206 | 4, 1); |
215 | break; | 207 | break; |
216 | 208 | ||
217 | case OP_31_XOP_STBX: | 209 | case OP_31_XOP_STBX: |
218 | rs = get_rs(inst); | ||
219 | emulated = kvmppc_handle_store(run, vcpu, | 210 | emulated = kvmppc_handle_store(run, vcpu, |
220 | kvmppc_get_gpr(vcpu, rs), | 211 | kvmppc_get_gpr(vcpu, rs), |
221 | 1, 1); | 212 | 1, 1); |
222 | break; | 213 | break; |
223 | 214 | ||
224 | case OP_31_XOP_STBUX: | 215 | case OP_31_XOP_STBUX: |
225 | rs = get_rs(inst); | ||
226 | ra = get_ra(inst); | ||
227 | rb = get_rb(inst); | ||
228 | |||
229 | emulated = kvmppc_handle_store(run, vcpu, | 216 | emulated = kvmppc_handle_store(run, vcpu, |
230 | kvmppc_get_gpr(vcpu, rs), | 217 | kvmppc_get_gpr(vcpu, rs), |
231 | 1, 1); | 218 | 1, 1); |
@@ -233,28 +220,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
233 | break; | 220 | break; |
234 | 221 | ||
235 | case OP_31_XOP_LHAX: | 222 | case OP_31_XOP_LHAX: |
236 | rt = get_rt(inst); | ||
237 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 223 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
238 | break; | 224 | break; |
239 | 225 | ||
240 | case OP_31_XOP_LHZX: | 226 | case OP_31_XOP_LHZX: |
241 | rt = get_rt(inst); | ||
242 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 227 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
243 | break; | 228 | break; |
244 | 229 | ||
245 | case OP_31_XOP_LHZUX: | 230 | case OP_31_XOP_LHZUX: |
246 | rt = get_rt(inst); | ||
247 | ra = get_ra(inst); | ||
248 | rb = get_rb(inst); | ||
249 | |||
250 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 231 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
251 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 232 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
252 | break; | 233 | break; |
253 | 234 | ||
254 | case OP_31_XOP_MFSPR: | 235 | case OP_31_XOP_MFSPR: |
255 | sprn = get_sprn(inst); | ||
256 | rt = get_rt(inst); | ||
257 | |||
258 | switch (sprn) { | 236 | switch (sprn) { |
259 | case SPRN_SRR0: | 237 | case SPRN_SRR0: |
260 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); | 238 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); |
@@ -310,20 +288,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
310 | break; | 288 | break; |
311 | 289 | ||
312 | case OP_31_XOP_STHX: | 290 | case OP_31_XOP_STHX: |
313 | rs = get_rs(inst); | ||
314 | ra = get_ra(inst); | ||
315 | rb = get_rb(inst); | ||
316 | |||
317 | emulated = kvmppc_handle_store(run, vcpu, | 291 | emulated = kvmppc_handle_store(run, vcpu, |
318 | kvmppc_get_gpr(vcpu, rs), | 292 | kvmppc_get_gpr(vcpu, rs), |
319 | 2, 1); | 293 | 2, 1); |
320 | break; | 294 | break; |
321 | 295 | ||
322 | case OP_31_XOP_STHUX: | 296 | case OP_31_XOP_STHUX: |
323 | rs = get_rs(inst); | ||
324 | ra = get_ra(inst); | ||
325 | rb = get_rb(inst); | ||
326 | |||
327 | emulated = kvmppc_handle_store(run, vcpu, | 297 | emulated = kvmppc_handle_store(run, vcpu, |
328 | kvmppc_get_gpr(vcpu, rs), | 298 | kvmppc_get_gpr(vcpu, rs), |
329 | 2, 1); | 299 | 2, 1); |
@@ -331,8 +301,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
331 | break; | 301 | break; |
332 | 302 | ||
333 | case OP_31_XOP_MTSPR: | 303 | case OP_31_XOP_MTSPR: |
334 | sprn = get_sprn(inst); | ||
335 | rs = get_rs(inst); | ||
336 | switch (sprn) { | 304 | switch (sprn) { |
337 | case SPRN_SRR0: | 305 | case SPRN_SRR0: |
338 | vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); | 306 | vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); |
@@ -384,7 +352,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
384 | break; | 352 | break; |
385 | 353 | ||
386 | case OP_31_XOP_LWBRX: | 354 | case OP_31_XOP_LWBRX: |
387 | rt = get_rt(inst); | ||
388 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); | 355 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); |
389 | break; | 356 | break; |
390 | 357 | ||
@@ -392,25 +359,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
392 | break; | 359 | break; |
393 | 360 | ||
394 | case OP_31_XOP_STWBRX: | 361 | case OP_31_XOP_STWBRX: |
395 | rs = get_rs(inst); | ||
396 | ra = get_ra(inst); | ||
397 | rb = get_rb(inst); | ||
398 | |||
399 | emulated = kvmppc_handle_store(run, vcpu, | 362 | emulated = kvmppc_handle_store(run, vcpu, |
400 | kvmppc_get_gpr(vcpu, rs), | 363 | kvmppc_get_gpr(vcpu, rs), |
401 | 4, 0); | 364 | 4, 0); |
402 | break; | 365 | break; |
403 | 366 | ||
404 | case OP_31_XOP_LHBRX: | 367 | case OP_31_XOP_LHBRX: |
405 | rt = get_rt(inst); | ||
406 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | 368 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); |
407 | break; | 369 | break; |
408 | 370 | ||
409 | case OP_31_XOP_STHBRX: | 371 | case OP_31_XOP_STHBRX: |
410 | rs = get_rs(inst); | ||
411 | ra = get_ra(inst); | ||
412 | rb = get_rb(inst); | ||
413 | |||
414 | emulated = kvmppc_handle_store(run, vcpu, | 372 | emulated = kvmppc_handle_store(run, vcpu, |
415 | kvmppc_get_gpr(vcpu, rs), | 373 | kvmppc_get_gpr(vcpu, rs), |
416 | 2, 0); | 374 | 2, 0); |
@@ -423,39 +381,30 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
423 | break; | 381 | break; |
424 | 382 | ||
425 | case OP_LWZ: | 383 | case OP_LWZ: |
426 | rt = get_rt(inst); | ||
427 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 384 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
428 | break; | 385 | break; |
429 | 386 | ||
430 | case OP_LWZU: | 387 | case OP_LWZU: |
431 | ra = get_ra(inst); | ||
432 | rt = get_rt(inst); | ||
433 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 388 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
434 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 389 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
435 | break; | 390 | break; |
436 | 391 | ||
437 | case OP_LBZ: | 392 | case OP_LBZ: |
438 | rt = get_rt(inst); | ||
439 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 393 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
440 | break; | 394 | break; |
441 | 395 | ||
442 | case OP_LBZU: | 396 | case OP_LBZU: |
443 | ra = get_ra(inst); | ||
444 | rt = get_rt(inst); | ||
445 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 397 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
446 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 398 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
447 | break; | 399 | break; |
448 | 400 | ||
449 | case OP_STW: | 401 | case OP_STW: |
450 | rs = get_rs(inst); | ||
451 | emulated = kvmppc_handle_store(run, vcpu, | 402 | emulated = kvmppc_handle_store(run, vcpu, |
452 | kvmppc_get_gpr(vcpu, rs), | 403 | kvmppc_get_gpr(vcpu, rs), |
453 | 4, 1); | 404 | 4, 1); |
454 | break; | 405 | break; |
455 | 406 | ||
456 | case OP_STWU: | 407 | case OP_STWU: |
457 | ra = get_ra(inst); | ||
458 | rs = get_rs(inst); | ||
459 | emulated = kvmppc_handle_store(run, vcpu, | 408 | emulated = kvmppc_handle_store(run, vcpu, |
460 | kvmppc_get_gpr(vcpu, rs), | 409 | kvmppc_get_gpr(vcpu, rs), |
461 | 4, 1); | 410 | 4, 1); |
@@ -463,15 +412,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
463 | break; | 412 | break; |
464 | 413 | ||
465 | case OP_STB: | 414 | case OP_STB: |
466 | rs = get_rs(inst); | ||
467 | emulated = kvmppc_handle_store(run, vcpu, | 415 | emulated = kvmppc_handle_store(run, vcpu, |
468 | kvmppc_get_gpr(vcpu, rs), | 416 | kvmppc_get_gpr(vcpu, rs), |
469 | 1, 1); | 417 | 1, 1); |
470 | break; | 418 | break; |
471 | 419 | ||
472 | case OP_STBU: | 420 | case OP_STBU: |
473 | ra = get_ra(inst); | ||
474 | rs = get_rs(inst); | ||
475 | emulated = kvmppc_handle_store(run, vcpu, | 421 | emulated = kvmppc_handle_store(run, vcpu, |
476 | kvmppc_get_gpr(vcpu, rs), | 422 | kvmppc_get_gpr(vcpu, rs), |
477 | 1, 1); | 423 | 1, 1); |
@@ -479,39 +425,30 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
479 | break; | 425 | break; |
480 | 426 | ||
481 | case OP_LHZ: | 427 | case OP_LHZ: |
482 | rt = get_rt(inst); | ||
483 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 428 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
484 | break; | 429 | break; |
485 | 430 | ||
486 | case OP_LHZU: | 431 | case OP_LHZU: |
487 | ra = get_ra(inst); | ||
488 | rt = get_rt(inst); | ||
489 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 432 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
490 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 433 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
491 | break; | 434 | break; |
492 | 435 | ||
493 | case OP_LHA: | 436 | case OP_LHA: |
494 | rt = get_rt(inst); | ||
495 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 437 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
496 | break; | 438 | break; |
497 | 439 | ||
498 | case OP_LHAU: | 440 | case OP_LHAU: |
499 | ra = get_ra(inst); | ||
500 | rt = get_rt(inst); | ||
501 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 441 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
502 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | 442 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
503 | break; | 443 | break; |
504 | 444 | ||
505 | case OP_STH: | 445 | case OP_STH: |
506 | rs = get_rs(inst); | ||
507 | emulated = kvmppc_handle_store(run, vcpu, | 446 | emulated = kvmppc_handle_store(run, vcpu, |
508 | kvmppc_get_gpr(vcpu, rs), | 447 | kvmppc_get_gpr(vcpu, rs), |
509 | 2, 1); | 448 | 2, 1); |
510 | break; | 449 | break; |
511 | 450 | ||
512 | case OP_STHU: | 451 | case OP_STHU: |
513 | ra = get_ra(inst); | ||
514 | rs = get_rs(inst); | ||
515 | emulated = kvmppc_handle_store(run, vcpu, | 452 | emulated = kvmppc_handle_store(run, vcpu, |
516 | kvmppc_get_gpr(vcpu, rs), | 453 | kvmppc_get_gpr(vcpu, rs), |
517 | 2, 1); | 454 | 2, 1); |