aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2008-11-05 10:36:16 -0500
committerAvi Kivity <avi@redhat.com>2008-12-31 09:52:21 -0500
commit75f74f0dbe086c239b4b0cc5ed75b903ea3e663f (patch)
treec6774128934667d1c82a6e458d9a4233574a95a4 /arch/powerpc
parentc381a04313e7c0fb04246b1ff711e0b5726de6c0 (diff)
KVM: ppc: refactor instruction emulation into generic and core-specific pieces
Cores provide 3 emulation hooks, implemented for example in the new 4xx_emulate.c: kvmppc_core_emulate_op kvmppc_core_emulate_mtspr kvmppc_core_emulate_mfspr Strictly speaking the last two aren't necessary, but provide for more informative error reporting ("unknown SPR"). Long term I'd like to have instruction decoding autogenerated from tables of opcodes, and that way we could aggregate universal, Book E, and core-specific instructions more easily and without redundant switch statements. Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h29
-rw-r--r--arch/powerpc/kvm/44x_emulate.c335
-rw-r--r--arch/powerpc/kvm/44x_tlb.c4
-rw-r--r--arch/powerpc/kvm/44x_tlb.h4
-rw-r--r--arch/powerpc/kvm/Makefile7
-rw-r--r--arch/powerpc/kvm/booke.c1
-rw-r--r--arch/powerpc/kvm/booke.h39
-rw-r--r--arch/powerpc/kvm/emulate.c272
8 files changed, 415 insertions, 276 deletions
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 96d5de90ac5a..aecf95d5fede 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -53,35 +53,13 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
53extern int kvmppc_emulate_instruction(struct kvm_run *run, 53extern int kvmppc_emulate_instruction(struct kvm_run *run,
54 struct kvm_vcpu *vcpu); 54 struct kvm_vcpu *vcpu);
55extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); 55extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
56extern int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws); 56extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
57extern int kvmppc_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc);
58 57
59extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, 58extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
60 u64 asid, u32 flags); 59 u64 asid, u32 flags);
61extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); 60extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
62extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); 61extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
63 62
64/* Helper function for "full" MSR writes. No need to call this if only EE is
65 * changing. */
66static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
67{
68 if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
69 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
70
71 vcpu->arch.msr = new_msr;
72
73 if (vcpu->arch.msr & MSR_WE)
74 kvm_vcpu_block(vcpu);
75}
76
77static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
78{
79 if (vcpu->arch.pid != new_pid) {
80 vcpu->arch.pid = new_pid;
81 vcpu->arch.swap_pid = 1;
82 }
83}
84
85/* Core-specific hooks */ 63/* Core-specific hooks */
86 64
87extern int kvmppc_core_check_processor_compat(void); 65extern int kvmppc_core_check_processor_compat(void);
@@ -99,6 +77,11 @@ extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
99extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 77extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
100 struct kvm_interrupt *irq); 78 struct kvm_interrupt *irq);
101 79
80extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
81 unsigned int op, int *advance);
82extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
83extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
84
102extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); 85extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
103 86
104#endif /* __POWERPC_KVM_PPC_H__ */ 87#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
new file mode 100644
index 000000000000..a634c0c4fa7e
--- /dev/null
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -0,0 +1,335 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <asm/kvm_ppc.h>
21#include <asm/dcr.h>
22#include <asm/dcr-regs.h>
23#include <asm/disassemble.h>
24
25#include "booke.h"
26#include "44x_tlb.h"
27
28#define OP_RFI 19
29
30#define XOP_RFI 50
31#define XOP_MFMSR 83
32#define XOP_WRTEE 131
33#define XOP_MTMSR 146
34#define XOP_WRTEEI 163
35#define XOP_MFDCR 323
36#define XOP_MTDCR 451
37#define XOP_TLBSX 914
38#define XOP_ICCCI 966
39#define XOP_TLBWE 978
40
41static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
42{
43 if (vcpu->arch.pid != new_pid) {
44 vcpu->arch.pid = new_pid;
45 vcpu->arch.swap_pid = 1;
46 }
47}
48
49static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
50{
51 vcpu->arch.pc = vcpu->arch.srr0;
52 kvmppc_set_msr(vcpu, vcpu->arch.srr1);
53}
54
55int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
56 unsigned int inst, int *advance)
57{
58 int emulated = EMULATE_DONE;
59 int dcrn;
60 int ra;
61 int rb;
62 int rc;
63 int rs;
64 int rt;
65 int ws;
66
67 switch (get_op(inst)) {
68
69 case OP_RFI:
70 switch (get_xop(inst)) {
71 case XOP_RFI:
72 kvmppc_emul_rfi(vcpu);
73 *advance = 0;
74 break;
75
76 default:
77 emulated = EMULATE_FAIL;
78 break;
79 }
80 break;
81
82 case 31:
83 switch (get_xop(inst)) {
84
85 case XOP_MFMSR:
86 rt = get_rt(inst);
87 vcpu->arch.gpr[rt] = vcpu->arch.msr;
88 break;
89
90 case XOP_MTMSR:
91 rs = get_rs(inst);
92 kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
93 break;
94
95 case XOP_WRTEE:
96 rs = get_rs(inst);
97 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
98 | (vcpu->arch.gpr[rs] & MSR_EE);
99 break;
100
101 case XOP_WRTEEI:
102 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
103 | (inst & MSR_EE);
104 break;
105
106 case XOP_MFDCR:
107 dcrn = get_dcrn(inst);
108 rt = get_rt(inst);
109
110 /* The guest may access CPR0 registers to determine the timebase
111 * frequency, and it must know the real host frequency because it
112 * can directly access the timebase registers.
113 *
114 * It would be possible to emulate those accesses in userspace,
115 * but userspace can really only figure out the end frequency.
116 * We could decompose that into the factors that compute it, but
117 * that's tricky math, and it's easier to just report the real
118 * CPR0 values.
119 */
120 switch (dcrn) {
121 case DCRN_CPR0_CONFIG_ADDR:
122 vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
123 break;
124 case DCRN_CPR0_CONFIG_DATA:
125 local_irq_disable();
126 mtdcr(DCRN_CPR0_CONFIG_ADDR,
127 vcpu->arch.cpr0_cfgaddr);
128 vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
129 local_irq_enable();
130 break;
131 default:
132 run->dcr.dcrn = dcrn;
133 run->dcr.data = 0;
134 run->dcr.is_write = 0;
135 vcpu->arch.io_gpr = rt;
136 vcpu->arch.dcr_needed = 1;
137 emulated = EMULATE_DO_DCR;
138 }
139
140 break;
141
142 case XOP_MTDCR:
143 dcrn = get_dcrn(inst);
144 rs = get_rs(inst);
145
146 /* emulate some access in kernel */
147 switch (dcrn) {
148 case DCRN_CPR0_CONFIG_ADDR:
149 vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
150 break;
151 default:
152 run->dcr.dcrn = dcrn;
153 run->dcr.data = vcpu->arch.gpr[rs];
154 run->dcr.is_write = 1;
155 vcpu->arch.dcr_needed = 1;
156 emulated = EMULATE_DO_DCR;
157 }
158
159 break;
160
161 case XOP_TLBWE:
162 ra = get_ra(inst);
163 rs = get_rs(inst);
164 ws = get_ws(inst);
165 emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
166 break;
167
168 case XOP_TLBSX:
169 rt = get_rt(inst);
170 ra = get_ra(inst);
171 rb = get_rb(inst);
172 rc = get_rc(inst);
173 emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
174 break;
175
176 case XOP_ICCCI:
177 break;
178
179 default:
180 emulated = EMULATE_FAIL;
181 }
182
183 break;
184
185 default:
186 emulated = EMULATE_FAIL;
187 }
188
189 return emulated;
190}
191
192int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
193{
194 switch (sprn) {
195 case SPRN_MMUCR:
196 vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
197 case SPRN_PID:
198 kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
199 case SPRN_CCR0:
200 vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
201 case SPRN_CCR1:
202 vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
203 case SPRN_DEAR:
204 vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
205 case SPRN_ESR:
206 vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
207 case SPRN_DBCR0:
208 vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
209 case SPRN_DBCR1:
210 vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
211 case SPRN_TSR:
212 vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
213 case SPRN_TCR:
214 vcpu->arch.tcr = vcpu->arch.gpr[rs];
215 kvmppc_emulate_dec(vcpu);
216 break;
217
218 /* Note: SPRG4-7 are user-readable. These values are
219 * loaded into the real SPRGs when resuming the
220 * guest. */
221 case SPRN_SPRG4:
222 vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
223 case SPRN_SPRG5:
224 vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
225 case SPRN_SPRG6:
226 vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
227 case SPRN_SPRG7:
228 vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
229
230 case SPRN_IVPR:
231 vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break;
232 case SPRN_IVOR0:
233 vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break;
234 case SPRN_IVOR1:
235 vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break;
236 case SPRN_IVOR2:
237 vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break;
238 case SPRN_IVOR3:
239 vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break;
240 case SPRN_IVOR4:
241 vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break;
242 case SPRN_IVOR5:
243 vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break;
244 case SPRN_IVOR6:
245 vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break;
246 case SPRN_IVOR7:
247 vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break;
248 case SPRN_IVOR8:
249 vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break;
250 case SPRN_IVOR9:
251 vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break;
252 case SPRN_IVOR10:
253 vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break;
254 case SPRN_IVOR11:
255 vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break;
256 case SPRN_IVOR12:
257 vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break;
258 case SPRN_IVOR13:
259 vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break;
260 case SPRN_IVOR14:
261 vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break;
262 case SPRN_IVOR15:
263 vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break;
264
265 default:
266 return EMULATE_FAIL;
267 }
268
269 return EMULATE_DONE;
270}
271
272int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
273{
274 switch (sprn) {
275 /* 440 */
276 case SPRN_MMUCR:
277 vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
278 case SPRN_CCR0:
279 vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
280 case SPRN_CCR1:
281 vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
282
283 /* Book E */
284 case SPRN_PID:
285 vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
286 case SPRN_IVPR:
287 vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
288 case SPRN_DEAR:
289 vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
290 case SPRN_ESR:
291 vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
292 case SPRN_DBCR0:
293 vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
294 case SPRN_DBCR1:
295 vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
296
297 case SPRN_IVOR0:
298 vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break;
299 case SPRN_IVOR1:
300 vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break;
301 case SPRN_IVOR2:
302 vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break;
303 case SPRN_IVOR3:
304 vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break;
305 case SPRN_IVOR4:
306 vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break;
307 case SPRN_IVOR5:
308 vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break;
309 case SPRN_IVOR6:
310 vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break;
311 case SPRN_IVOR7:
312 vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break;
313 case SPRN_IVOR8:
314 vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break;
315 case SPRN_IVOR9:
316 vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break;
317 case SPRN_IVOR10:
318 vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break;
319 case SPRN_IVOR11:
320 vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break;
321 case SPRN_IVOR12:
322 vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break;
323 case SPRN_IVOR13:
324 vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break;
325 case SPRN_IVOR14:
326 vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break;
327 case SPRN_IVOR15:
328 vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break;
329 default:
330 return EMULATE_FAIL;
331 }
332
333 return EMULATE_DONE;
334}
335
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 5152fe5b2a9b..bb6da134cadb 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -301,7 +301,7 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
301 return 1; 301 return 1;
302} 302}
303 303
304int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) 304int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
305{ 305{
306 u64 eaddr; 306 u64 eaddr;
307 u64 raddr; 307 u64 raddr;
@@ -363,7 +363,7 @@ int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
363 return EMULATE_DONE; 363 return EMULATE_DONE;
364} 364}
365 365
366int kvmppc_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) 366int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
367{ 367{
368 u32 ea; 368 u32 ea;
369 int index; 369 int index;
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h
index 357d79ae5493..b1029af3de20 100644
--- a/arch/powerpc/kvm/44x_tlb.h
+++ b/arch/powerpc/kvm/44x_tlb.h
@@ -31,6 +31,10 @@ extern struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu,
31 gva_t eaddr); 31 gva_t eaddr);
32extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i); 32extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i);
33 33
34extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb,
35 u8 rc);
36extern int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws);
37
34/* TLB helper functions */ 38/* TLB helper functions */
35static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe) 39static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe)
36{ 40{
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index f5e33756f318..f045fad0f4f1 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -13,5 +13,10 @@ obj-$(CONFIG_KVM) += kvm.o
13 13
14AFLAGS_booke_interrupts.o := -I$(obj) 14AFLAGS_booke_interrupts.o := -I$(obj)
15 15
16kvm-440-objs := booke.o booke_interrupts.o 44x.o 44x_tlb.o 16kvm-440-objs := \
17 booke.o \
18 booke_interrupts.o \
19 44x.o \
20 44x_tlb.o \
21 44x_emulate.o
17obj-$(CONFIG_KVM_440) += kvm-440.o 22obj-$(CONFIG_KVM_440) += kvm-440.o
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 138014acf3cf..ea630095e280 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -29,6 +29,7 @@
29#include <asm/kvm_ppc.h> 29#include <asm/kvm_ppc.h>
30#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
31 31
32#include "booke.h"
32#include "44x_tlb.h" 33#include "44x_tlb.h"
33 34
34unsigned long kvmppc_booke_handlers; 35unsigned long kvmppc_booke_handlers;
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
new file mode 100644
index 000000000000..f694a4b2dafa
--- /dev/null
+++ b/arch/powerpc/kvm/booke.h
@@ -0,0 +1,39 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __KVM_BOOKE_H__
21#define __KVM_BOOKE_H__
22
23#include <linux/types.h>
24#include <linux/kvm_host.h>
25
26/* Helper function for "full" MSR writes. No need to call this if only EE is
27 * changing. */
28static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
29{
30 if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
31 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
32
33 vcpu->arch.msr = new_msr;
34
35 if (vcpu->arch.msr & MSR_WE)
36 kvm_vcpu_block(vcpu);
37}
38
39#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 5fd9cf779be5..30a49f8c49b2 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -23,14 +23,13 @@
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
25 25
26#include <asm/dcr.h> 26#include <asm/reg.h>
27#include <asm/dcr-regs.h>
28#include <asm/time.h> 27#include <asm/time.h>
29#include <asm/byteorder.h> 28#include <asm/byteorder.h>
30#include <asm/kvm_ppc.h> 29#include <asm/kvm_ppc.h>
31#include <asm/disassemble.h> 30#include <asm/disassemble.h>
32 31
33static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) 32void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
34{ 33{
35 if (vcpu->arch.tcr & TCR_DIE) { 34 if (vcpu->arch.tcr & TCR_DIE) {
36 /* The decrementer ticks at the same rate as the timebase, so 35 /* The decrementer ticks at the same rate as the timebase, so
@@ -46,12 +45,6 @@ static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
46 } 45 }
47} 46}
48 47
49static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
50{
51 vcpu->arch.pc = vcpu->arch.srr0;
52 kvmppc_set_msr(vcpu, vcpu->arch.srr1);
53}
54
55/* XXX to do: 48/* XXX to do:
56 * lhax 49 * lhax
57 * lhaux 50 * lhaux
@@ -66,18 +59,17 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
66 * 59 *
67 * XXX is_bigendian should depend on MMU mapping or MSR[LE] 60 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
68 */ 61 */
62/* XXX Should probably auto-generate instruction decoding for a particular core
63 * from opcode tables in the future. */
69int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 64int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
70{ 65{
71 u32 inst = vcpu->arch.last_inst; 66 u32 inst = vcpu->arch.last_inst;
72 u32 ea; 67 u32 ea;
73 int ra; 68 int ra;
74 int rb; 69 int rb;
75 int rc;
76 int rs; 70 int rs;
77 int rt; 71 int rt;
78 int ws;
79 int sprn; 72 int sprn;
80 int dcrn;
81 enum emulation_result emulated = EMULATE_DONE; 73 enum emulation_result emulated = EMULATE_DONE;
82 int advance = 1; 74 int advance = 1;
83 75
@@ -88,19 +80,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
88 advance = 0; 80 advance = 0;
89 break; 81 break;
90 82
91 case 19:
92 switch (get_xop(inst)) {
93 case 50: /* rfi */
94 kvmppc_emul_rfi(vcpu);
95 advance = 0;
96 break;
97
98 default:
99 emulated = EMULATE_FAIL;
100 break;
101 }
102 break;
103
104 case 31: 83 case 31:
105 switch (get_xop(inst)) { 84 switch (get_xop(inst)) {
106 85
@@ -109,27 +88,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
109 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 88 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
110 break; 89 break;
111 90
112 case 83: /* mfmsr */
113 rt = get_rt(inst);
114 vcpu->arch.gpr[rt] = vcpu->arch.msr;
115 break;
116
117 case 87: /* lbzx */ 91 case 87: /* lbzx */
118 rt = get_rt(inst); 92 rt = get_rt(inst);
119 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 93 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
120 break; 94 break;
121 95
122 case 131: /* wrtee */
123 rs = get_rs(inst);
124 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
125 | (vcpu->arch.gpr[rs] & MSR_EE);
126 break;
127
128 case 146: /* mtmsr */
129 rs = get_rs(inst);
130 kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
131 break;
132
133 case 151: /* stwx */ 96 case 151: /* stwx */
134 rs = get_rs(inst); 97 rs = get_rs(inst);
135 emulated = kvmppc_handle_store(run, vcpu, 98 emulated = kvmppc_handle_store(run, vcpu,
@@ -137,11 +100,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
137 4, 1); 100 4, 1);
138 break; 101 break;
139 102
140 case 163: /* wrteei */
141 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
142 | (inst & MSR_EE);
143 break;
144
145 case 215: /* stbx */ 103 case 215: /* stbx */
146 rs = get_rs(inst); 104 rs = get_rs(inst);
147 emulated = kvmppc_handle_store(run, vcpu, 105 emulated = kvmppc_handle_store(run, vcpu,
@@ -182,42 +140,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
182 vcpu->arch.gpr[ra] = ea; 140 vcpu->arch.gpr[ra] = ea;
183 break; 141 break;
184 142
185 case 323: /* mfdcr */
186 dcrn = get_dcrn(inst);
187 rt = get_rt(inst);
188
189 /* The guest may access CPR0 registers to determine the timebase
190 * frequency, and it must know the real host frequency because it
191 * can directly access the timebase registers.
192 *
193 * It would be possible to emulate those accesses in userspace,
194 * but userspace can really only figure out the end frequency.
195 * We could decompose that into the factors that compute it, but
196 * that's tricky math, and it's easier to just report the real
197 * CPR0 values.
198 */
199 switch (dcrn) {
200 case DCRN_CPR0_CONFIG_ADDR:
201 vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
202 break;
203 case DCRN_CPR0_CONFIG_DATA:
204 local_irq_disable();
205 mtdcr(DCRN_CPR0_CONFIG_ADDR,
206 vcpu->arch.cpr0_cfgaddr);
207 vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
208 local_irq_enable();
209 break;
210 default:
211 run->dcr.dcrn = dcrn;
212 run->dcr.data = 0;
213 run->dcr.is_write = 0;
214 vcpu->arch.io_gpr = rt;
215 vcpu->arch.dcr_needed = 1;
216 emulated = EMULATE_DO_DCR;
217 }
218
219 break;
220
221 case 339: /* mfspr */ 143 case 339: /* mfspr */
222 sprn = get_sprn(inst); 144 sprn = get_sprn(inst);
223 rt = get_rt(inst); 145 rt = get_rt(inst);
@@ -227,26 +149,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
227 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; 149 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
228 case SPRN_SRR1: 150 case SPRN_SRR1:
229 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; 151 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
230 case SPRN_MMUCR:
231 vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
232 case SPRN_PID:
233 vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
234 case SPRN_IVPR:
235 vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
236 case SPRN_CCR0:
237 vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
238 case SPRN_CCR1:
239 vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
240 case SPRN_PVR: 152 case SPRN_PVR:
241 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; 153 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
242 case SPRN_DEAR:
243 vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
244 case SPRN_ESR:
245 vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
246 case SPRN_DBCR0:
247 vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
248 case SPRN_DBCR1:
249 vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
250 154
251 /* Note: mftb and TBRL/TBWL are user-accessible, so 155 /* Note: mftb and TBRL/TBWL are user-accessible, so
252 * the guest can always access the real TB anyways. 156 * the guest can always access the real TB anyways.
@@ -267,42 +171,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
267 /* Note: SPRG4-7 are user-readable, so we don't get 171 /* Note: SPRG4-7 are user-readable, so we don't get
268 * a trap. */ 172 * a trap. */
269 173
270 case SPRN_IVOR0:
271 vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break;
272 case SPRN_IVOR1:
273 vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break;
274 case SPRN_IVOR2:
275 vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break;
276 case SPRN_IVOR3:
277 vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break;
278 case SPRN_IVOR4:
279 vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break;
280 case SPRN_IVOR5:
281 vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break;
282 case SPRN_IVOR6:
283 vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break;
284 case SPRN_IVOR7:
285 vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break;
286 case SPRN_IVOR8:
287 vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break;
288 case SPRN_IVOR9:
289 vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break;
290 case SPRN_IVOR10:
291 vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break;
292 case SPRN_IVOR11:
293 vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break;
294 case SPRN_IVOR12:
295 vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break;
296 case SPRN_IVOR13:
297 vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break;
298 case SPRN_IVOR14:
299 vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break;
300 case SPRN_IVOR15:
301 vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break;
302
303 default: 174 default:
304 printk("mfspr: unknown spr %x\n", sprn); 175 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
305 vcpu->arch.gpr[rt] = 0; 176 if (emulated == EMULATE_FAIL) {
177 printk("mfspr: unknown spr %x\n", sprn);
178 vcpu->arch.gpr[rt] = 0;
179 }
306 break; 180 break;
307 } 181 }
308 break; 182 break;
@@ -332,25 +206,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
332 vcpu->arch.gpr[ra] = ea; 206 vcpu->arch.gpr[ra] = ea;
333 break; 207 break;
334 208
335 case 451: /* mtdcr */
336 dcrn = get_dcrn(inst);
337 rs = get_rs(inst);
338
339 /* emulate some access in kernel */
340 switch (dcrn) {
341 case DCRN_CPR0_CONFIG_ADDR:
342 vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
343 break;
344 default:
345 run->dcr.dcrn = dcrn;
346 run->dcr.data = vcpu->arch.gpr[rs];
347 run->dcr.is_write = 1;
348 vcpu->arch.dcr_needed = 1;
349 emulated = EMULATE_DO_DCR;
350 }
351
352 break;
353
354 case 467: /* mtspr */ 209 case 467: /* mtspr */
355 sprn = get_sprn(inst); 210 sprn = get_sprn(inst);
356 rs = get_rs(inst); 211 rs = get_rs(inst);
@@ -359,22 +214,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
359 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; 214 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
360 case SPRN_SRR1: 215 case SPRN_SRR1:
361 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; 216 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
362 case SPRN_MMUCR:
363 vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
364 case SPRN_PID:
365 kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
366 case SPRN_CCR0:
367 vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
368 case SPRN_CCR1:
369 vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
370 case SPRN_DEAR:
371 vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
372 case SPRN_ESR:
373 vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
374 case SPRN_DBCR0:
375 vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
376 case SPRN_DBCR1:
377 vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
378 217
379 /* XXX We need to context-switch the timebase for 218 /* XXX We need to context-switch the timebase for
380 * watchdog and FIT. */ 219 * watchdog and FIT. */
@@ -386,14 +225,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
386 kvmppc_emulate_dec(vcpu); 225 kvmppc_emulate_dec(vcpu);
387 break; 226 break;
388 227
389 case SPRN_TSR:
390 vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
391
392 case SPRN_TCR:
393 vcpu->arch.tcr = vcpu->arch.gpr[rs];
394 kvmppc_emulate_dec(vcpu);
395 break;
396
397 case SPRN_SPRG0: 228 case SPRN_SPRG0:
398 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; 229 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
399 case SPRN_SPRG1: 230 case SPRN_SPRG1:
@@ -403,56 +234,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
403 case SPRN_SPRG3: 234 case SPRN_SPRG3:
404 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; 235 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
405 236
406 /* Note: SPRG4-7 are user-readable. These values are
407 * loaded into the real SPRGs when resuming the
408 * guest. */
409 case SPRN_SPRG4:
410 vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
411 case SPRN_SPRG5:
412 vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
413 case SPRN_SPRG6:
414 vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
415 case SPRN_SPRG7:
416 vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
417
418 case SPRN_IVPR:
419 vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break;
420 case SPRN_IVOR0:
421 vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break;
422 case SPRN_IVOR1:
423 vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break;
424 case SPRN_IVOR2:
425 vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break;
426 case SPRN_IVOR3:
427 vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break;
428 case SPRN_IVOR4:
429 vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break;
430 case SPRN_IVOR5:
431 vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break;
432 case SPRN_IVOR6:
433 vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break;
434 case SPRN_IVOR7:
435 vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break;
436 case SPRN_IVOR8:
437 vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break;
438 case SPRN_IVOR9:
439 vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break;
440 case SPRN_IVOR10:
441 vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break;
442 case SPRN_IVOR11:
443 vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break;
444 case SPRN_IVOR12:
445 vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break;
446 case SPRN_IVOR13:
447 vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break;
448 case SPRN_IVOR14:
449 vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break;
450 case SPRN_IVOR15:
451 vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break;
452
453 default: 237 default:
454 printk("mtspr: unknown spr %x\n", sprn); 238 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
455 emulated = EMULATE_FAIL; 239 if (emulated == EMULATE_FAIL)
240 printk("mtspr: unknown spr %x\n", sprn);
456 break; 241 break;
457 } 242 }
458 break; 243 break;
@@ -483,21 +268,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
483 4, 0); 268 4, 0);
484 break; 269 break;
485 270
486 case 978: /* tlbwe */
487 ra = get_ra(inst);
488 rs = get_rs(inst);
489 ws = get_ws(inst);
490 emulated = kvmppc_emul_tlbwe(vcpu, ra, rs, ws);
491 break;
492
493 case 914: /* tlbsx */
494 rt = get_rt(inst);
495 ra = get_ra(inst);
496 rb = get_rb(inst);
497 rc = get_rc(inst);
498 emulated = kvmppc_emul_tlbsx(vcpu, rt, ra, rb, rc);
499 break;
500
501 case 790: /* lhbrx */ 271 case 790: /* lhbrx */
502 rt = get_rt(inst); 272 rt = get_rt(inst);
503 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); 273 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
@@ -513,14 +283,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
513 2, 0); 283 2, 0);
514 break; 284 break;
515 285
516 case 966: /* iccci */
517 break;
518
519 default: 286 default:
520 printk("unknown: op %d xop %d\n", get_op(inst), 287 /* Attempt core-specific emulation below. */
521 get_xop(inst));
522 emulated = EMULATE_FAIL; 288 emulated = EMULATE_FAIL;
523 break;
524 } 289 }
525 break; 290 break;
526 291
@@ -603,9 +368,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
603 break; 368 break;
604 369
605 default: 370 default:
606 printk("unknown op %d\n", get_op(inst));
607 emulated = EMULATE_FAIL; 371 emulated = EMULATE_FAIL;
608 break; 372 }
373
374 if (emulated == EMULATE_FAIL) {
375 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
376 if (emulated == EMULATE_FAIL) {
377 advance = 0;
378 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
379 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
380 }
609 } 381 }
610 382
611 KVMTRACE_3D(PPC_INSTR, vcpu, inst, vcpu->arch.pc, emulated, entryexit); 383 KVMTRACE_3D(PPC_INSTR, vcpu, inst, vcpu->arch.pc, emulated, entryexit);