aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2009-10-30 01:47:14 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-11-05 00:49:55 -0500
commitc215c6e49fef6c79a5b98f66f11cc6b1e395cb59 (patch)
treed995d5f061bf5b25e2833b400eb09979a71818ee /arch/powerpc/kvm
parent012351808174120b2e1839d48e2f0678808b2367 (diff)
Add book3s_64 specific opcode emulation
There are generic parts of PowerPC that can be shared across all implementations and specific parts that only apply to BookE or desktop PPCs. This patch adds emulation for desktop specific opcodes that don't apply to BookE CPUs. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_64_emulate.c337
1 files changed, 337 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c
new file mode 100644
index 000000000000..c343e67306e0
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_emulate.c
@@ -0,0 +1,337 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/kvm_ppc.h>
21#include <asm/disassemble.h>
22#include <asm/kvm_book3s.h>
23#include <asm/reg.h>
24
25#define OP_19_XOP_RFID 18
26#define OP_19_XOP_RFI 50
27
28#define OP_31_XOP_MFMSR 83
29#define OP_31_XOP_MTMSR 146
30#define OP_31_XOP_MTMSRD 178
31#define OP_31_XOP_MTSRIN 242
32#define OP_31_XOP_TLBIEL 274
33#define OP_31_XOP_TLBIE 306
34#define OP_31_XOP_SLBMTE 402
35#define OP_31_XOP_SLBIE 434
36#define OP_31_XOP_SLBIA 498
37#define OP_31_XOP_MFSRIN 659
38#define OP_31_XOP_SLBMFEV 851
39#define OP_31_XOP_EIOIO 854
40#define OP_31_XOP_SLBMFEE 915
41
42/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
43#define OP_31_XOP_DCBZ 1010
44
45int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
46 unsigned int inst, int *advance)
47{
48 int emulated = EMULATE_DONE;
49
50 switch (get_op(inst)) {
51 case 19:
52 switch (get_xop(inst)) {
53 case OP_19_XOP_RFID:
54 case OP_19_XOP_RFI:
55 vcpu->arch.pc = vcpu->arch.srr0;
56 kvmppc_set_msr(vcpu, vcpu->arch.srr1);
57 *advance = 0;
58 break;
59
60 default:
61 emulated = EMULATE_FAIL;
62 break;
63 }
64 break;
65 case 31:
66 switch (get_xop(inst)) {
67 case OP_31_XOP_MFMSR:
68 vcpu->arch.gpr[get_rt(inst)] = vcpu->arch.msr;
69 break;
70 case OP_31_XOP_MTMSRD:
71 {
72 ulong rs = vcpu->arch.gpr[get_rs(inst)];
73 if (inst & 0x10000) {
74 vcpu->arch.msr &= ~(MSR_RI | MSR_EE);
75 vcpu->arch.msr |= rs & (MSR_RI | MSR_EE);
76 } else
77 kvmppc_set_msr(vcpu, rs);
78 break;
79 }
80 case OP_31_XOP_MTMSR:
81 kvmppc_set_msr(vcpu, vcpu->arch.gpr[get_rs(inst)]);
82 break;
83 case OP_31_XOP_MFSRIN:
84 {
85 int srnum;
86
87 srnum = (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf;
88 if (vcpu->arch.mmu.mfsrin) {
89 u32 sr;
90 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
91 vcpu->arch.gpr[get_rt(inst)] = sr;
92 }
93 break;
94 }
95 case OP_31_XOP_MTSRIN:
96 vcpu->arch.mmu.mtsrin(vcpu,
97 (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf,
98 vcpu->arch.gpr[get_rs(inst)]);
99 break;
100 case OP_31_XOP_TLBIE:
101 case OP_31_XOP_TLBIEL:
102 {
103 bool large = (inst & 0x00200000) ? true : false;
104 ulong addr = vcpu->arch.gpr[get_rb(inst)];
105 vcpu->arch.mmu.tlbie(vcpu, addr, large);
106 break;
107 }
108 case OP_31_XOP_EIOIO:
109 break;
110 case OP_31_XOP_SLBMTE:
111 if (!vcpu->arch.mmu.slbmte)
112 return EMULATE_FAIL;
113
114 vcpu->arch.mmu.slbmte(vcpu, vcpu->arch.gpr[get_rs(inst)],
115 vcpu->arch.gpr[get_rb(inst)]);
116 break;
117 case OP_31_XOP_SLBIE:
118 if (!vcpu->arch.mmu.slbie)
119 return EMULATE_FAIL;
120
121 vcpu->arch.mmu.slbie(vcpu, vcpu->arch.gpr[get_rb(inst)]);
122 break;
123 case OP_31_XOP_SLBIA:
124 if (!vcpu->arch.mmu.slbia)
125 return EMULATE_FAIL;
126
127 vcpu->arch.mmu.slbia(vcpu);
128 break;
129 case OP_31_XOP_SLBMFEE:
130 if (!vcpu->arch.mmu.slbmfee) {
131 emulated = EMULATE_FAIL;
132 } else {
133 ulong t, rb;
134
135 rb = vcpu->arch.gpr[get_rb(inst)];
136 t = vcpu->arch.mmu.slbmfee(vcpu, rb);
137 vcpu->arch.gpr[get_rt(inst)] = t;
138 }
139 break;
140 case OP_31_XOP_SLBMFEV:
141 if (!vcpu->arch.mmu.slbmfev) {
142 emulated = EMULATE_FAIL;
143 } else {
144 ulong t, rb;
145
146 rb = vcpu->arch.gpr[get_rb(inst)];
147 t = vcpu->arch.mmu.slbmfev(vcpu, rb);
148 vcpu->arch.gpr[get_rt(inst)] = t;
149 }
150 break;
151 case OP_31_XOP_DCBZ:
152 {
153 ulong rb = vcpu->arch.gpr[get_rb(inst)];
154 ulong ra = 0;
155 ulong addr;
156 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
157
158 if (get_ra(inst))
159 ra = vcpu->arch.gpr[get_ra(inst)];
160
161 addr = (ra + rb) & ~31ULL;
162 if (!(vcpu->arch.msr & MSR_SF))
163 addr &= 0xffffffff;
164
165 if (kvmppc_st(vcpu, addr, 32, zeros)) {
166 vcpu->arch.dear = addr;
167 vcpu->arch.fault_dear = addr;
168 to_book3s(vcpu)->dsisr = DSISR_PROTFAULT |
169 DSISR_ISSTORE;
170 kvmppc_book3s_queue_irqprio(vcpu,
171 BOOK3S_INTERRUPT_DATA_STORAGE);
172 kvmppc_mmu_pte_flush(vcpu, addr, ~0xFFFULL);
173 }
174
175 break;
176 }
177 default:
178 emulated = EMULATE_FAIL;
179 }
180 break;
181 default:
182 emulated = EMULATE_FAIL;
183 }
184
185 return emulated;
186}
187
188static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u64 val)
189{
190 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
191 struct kvmppc_bat *bat;
192
193 switch (sprn) {
194 case SPRN_IBAT0U ... SPRN_IBAT3L:
195 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
196 break;
197 case SPRN_IBAT4U ... SPRN_IBAT7L:
198 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT4U) / 2];
199 break;
200 case SPRN_DBAT0U ... SPRN_DBAT3L:
201 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
202 break;
203 case SPRN_DBAT4U ... SPRN_DBAT7L:
204 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT4U) / 2];
205 break;
206 default:
207 BUG();
208 }
209
210 if (!(sprn % 2)) {
211 /* Upper BAT */
212 u32 bl = (val >> 2) & 0x7ff;
213 bat->bepi_mask = (~bl << 17);
214 bat->bepi = val & 0xfffe0000;
215 bat->vs = (val & 2) ? 1 : 0;
216 bat->vp = (val & 1) ? 1 : 0;
217 } else {
218 /* Lower BAT */
219 bat->brpn = val & 0xfffe0000;
220 bat->wimg = (val >> 3) & 0xf;
221 bat->pp = val & 3;
222 }
223}
224
225int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
226{
227 int emulated = EMULATE_DONE;
228
229 switch (sprn) {
230 case SPRN_SDR1:
231 to_book3s(vcpu)->sdr1 = vcpu->arch.gpr[rs];
232 break;
233 case SPRN_DSISR:
234 to_book3s(vcpu)->dsisr = vcpu->arch.gpr[rs];
235 break;
236 case SPRN_DAR:
237 vcpu->arch.dear = vcpu->arch.gpr[rs];
238 break;
239 case SPRN_HIOR:
240 to_book3s(vcpu)->hior = vcpu->arch.gpr[rs];
241 break;
242 case SPRN_IBAT0U ... SPRN_IBAT3L:
243 case SPRN_IBAT4U ... SPRN_IBAT7L:
244 case SPRN_DBAT0U ... SPRN_DBAT3L:
245 case SPRN_DBAT4U ... SPRN_DBAT7L:
246 kvmppc_write_bat(vcpu, sprn, vcpu->arch.gpr[rs]);
247 /* BAT writes happen so rarely that we're ok to flush
248 * everything here */
249 kvmppc_mmu_pte_flush(vcpu, 0, 0);
250 break;
251 case SPRN_HID0:
252 to_book3s(vcpu)->hid[0] = vcpu->arch.gpr[rs];
253 break;
254 case SPRN_HID1:
255 to_book3s(vcpu)->hid[1] = vcpu->arch.gpr[rs];
256 break;
257 case SPRN_HID2:
258 to_book3s(vcpu)->hid[2] = vcpu->arch.gpr[rs];
259 break;
260 case SPRN_HID4:
261 to_book3s(vcpu)->hid[4] = vcpu->arch.gpr[rs];
262 break;
263 case SPRN_HID5:
264 to_book3s(vcpu)->hid[5] = vcpu->arch.gpr[rs];
265 /* guest HID5 set can change is_dcbz32 */
266 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
267 (mfmsr() & MSR_HV))
268 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
269 break;
270 case SPRN_ICTC:
271 case SPRN_THRM1:
272 case SPRN_THRM2:
273 case SPRN_THRM3:
274 case SPRN_CTRLF:
275 case SPRN_CTRLT:
276 break;
277 default:
278 printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
279#ifndef DEBUG_SPR
280 emulated = EMULATE_FAIL;
281#endif
282 break;
283 }
284
285 return emulated;
286}
287
288int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
289{
290 int emulated = EMULATE_DONE;
291
292 switch (sprn) {
293 case SPRN_SDR1:
294 vcpu->arch.gpr[rt] = to_book3s(vcpu)->sdr1;
295 break;
296 case SPRN_DSISR:
297 vcpu->arch.gpr[rt] = to_book3s(vcpu)->dsisr;
298 break;
299 case SPRN_DAR:
300 vcpu->arch.gpr[rt] = vcpu->arch.dear;
301 break;
302 case SPRN_HIOR:
303 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hior;
304 break;
305 case SPRN_HID0:
306 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[0];
307 break;
308 case SPRN_HID1:
309 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[1];
310 break;
311 case SPRN_HID2:
312 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[2];
313 break;
314 case SPRN_HID4:
315 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[4];
316 break;
317 case SPRN_HID5:
318 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[5];
319 break;
320 case SPRN_THRM1:
321 case SPRN_THRM2:
322 case SPRN_THRM3:
323 case SPRN_CTRLF:
324 case SPRN_CTRLT:
325 vcpu->arch.gpr[rt] = 0;
326 break;
327 default:
328 printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
329#ifndef DEBUG_SPR
330 emulated = EMULATE_FAIL;
331#endif
332 break;
333 }
334
335 return emulated;
336}
337