aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/emulate.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/emulate.c')
-rw-r--r--arch/powerpc/kvm/emulate.c170
1 files changed, 115 insertions, 55 deletions
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 7737146af3fb..cb72a65f4ecc 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -18,7 +18,7 @@
18 */ 18 */
19 19
20#include <linux/jiffies.h> 20#include <linux/jiffies.h>
21#include <linux/timer.h> 21#include <linux/hrtimer.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
@@ -32,6 +32,7 @@
32#include "trace.h" 32#include "trace.h"
33 33
34#define OP_TRAP 3 34#define OP_TRAP 3
35#define OP_TRAP_64 2
35 36
36#define OP_31_XOP_LWZX 23 37#define OP_31_XOP_LWZX 23
37#define OP_31_XOP_LBZX 87 38#define OP_31_XOP_LBZX 87
@@ -64,19 +65,48 @@
64#define OP_STH 44 65#define OP_STH 44
65#define OP_STHU 45 66#define OP_STHU 45
66 67
68#ifdef CONFIG_PPC64
69static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
70{
71 return 1;
72}
73#else
74static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
75{
76 return vcpu->arch.tcr & TCR_DIE;
77}
78#endif
79
67void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) 80void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
68{ 81{
69 if (vcpu->arch.tcr & TCR_DIE) { 82 unsigned long dec_nsec;
83
84 pr_debug("mtDEC: %x\n", vcpu->arch.dec);
85#ifdef CONFIG_PPC64
86 /* mtdec lowers the interrupt line when positive. */
87 kvmppc_core_dequeue_dec(vcpu);
88
89 /* POWER4+ triggers a dec interrupt if the value is < 0 */
90 if (vcpu->arch.dec & 0x80000000) {
91 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
92 kvmppc_core_queue_dec(vcpu);
93 return;
94 }
95#endif
96 if (kvmppc_dec_enabled(vcpu)) {
70 /* The decrementer ticks at the same rate as the timebase, so 97 /* The decrementer ticks at the same rate as the timebase, so
71 * that's how we convert the guest DEC value to the number of 98 * that's how we convert the guest DEC value to the number of
72 * host ticks. */ 99 * host ticks. */
73 unsigned long nr_jiffies;
74 100
75 nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy; 101 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
76 mod_timer(&vcpu->arch.dec_timer, 102 dec_nsec = vcpu->arch.dec;
77 get_jiffies_64() + nr_jiffies); 103 dec_nsec *= 1000;
104 dec_nsec /= tb_ticks_per_usec;
105 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
106 HRTIMER_MODE_REL);
107 vcpu->arch.dec_jiffies = get_tb();
78 } else { 108 } else {
79 del_timer(&vcpu->arch.dec_timer); 109 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
80 } 110 }
81} 111}
82 112
@@ -111,10 +141,20 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
111 /* this default type might be overwritten by subcategories */ 141 /* this default type might be overwritten by subcategories */
112 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 142 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
113 143
144 pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
145
146 /* Try again next time */
147 if (inst == KVM_INST_FETCH_FAILED)
148 return EMULATE_DONE;
149
114 switch (get_op(inst)) { 150 switch (get_op(inst)) {
115 case OP_TRAP: 151 case OP_TRAP:
116 vcpu->arch.esr |= ESR_PTR; 152#ifdef CONFIG_PPC64
117 kvmppc_core_queue_program(vcpu); 153 case OP_TRAP_64:
154 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
155#else
156 kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR);
157#endif
118 advance = 0; 158 advance = 0;
119 break; 159 break;
120 160
@@ -134,14 +174,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
134 case OP_31_XOP_STWX: 174 case OP_31_XOP_STWX:
135 rs = get_rs(inst); 175 rs = get_rs(inst);
136 emulated = kvmppc_handle_store(run, vcpu, 176 emulated = kvmppc_handle_store(run, vcpu,
137 vcpu->arch.gpr[rs], 177 kvmppc_get_gpr(vcpu, rs),
138 4, 1); 178 4, 1);
139 break; 179 break;
140 180
141 case OP_31_XOP_STBX: 181 case OP_31_XOP_STBX:
142 rs = get_rs(inst); 182 rs = get_rs(inst);
143 emulated = kvmppc_handle_store(run, vcpu, 183 emulated = kvmppc_handle_store(run, vcpu,
144 vcpu->arch.gpr[rs], 184 kvmppc_get_gpr(vcpu, rs),
145 1, 1); 185 1, 1);
146 break; 186 break;
147 187
@@ -150,14 +190,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
150 ra = get_ra(inst); 190 ra = get_ra(inst);
151 rb = get_rb(inst); 191 rb = get_rb(inst);
152 192
153 ea = vcpu->arch.gpr[rb]; 193 ea = kvmppc_get_gpr(vcpu, rb);
154 if (ra) 194 if (ra)
155 ea += vcpu->arch.gpr[ra]; 195 ea += kvmppc_get_gpr(vcpu, ra);
156 196
157 emulated = kvmppc_handle_store(run, vcpu, 197 emulated = kvmppc_handle_store(run, vcpu,
158 vcpu->arch.gpr[rs], 198 kvmppc_get_gpr(vcpu, rs),
159 1, 1); 199 1, 1);
160 vcpu->arch.gpr[rs] = ea; 200 kvmppc_set_gpr(vcpu, rs, ea);
161 break; 201 break;
162 202
163 case OP_31_XOP_LHZX: 203 case OP_31_XOP_LHZX:
@@ -170,12 +210,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
170 ra = get_ra(inst); 210 ra = get_ra(inst);
171 rb = get_rb(inst); 211 rb = get_rb(inst);
172 212
173 ea = vcpu->arch.gpr[rb]; 213 ea = kvmppc_get_gpr(vcpu, rb);
174 if (ra) 214 if (ra)
175 ea += vcpu->arch.gpr[ra]; 215 ea += kvmppc_get_gpr(vcpu, ra);
176 216
177 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 217 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
178 vcpu->arch.gpr[ra] = ea; 218 kvmppc_set_gpr(vcpu, ra, ea);
179 break; 219 break;
180 220
181 case OP_31_XOP_MFSPR: 221 case OP_31_XOP_MFSPR:
@@ -184,38 +224,49 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
184 224
185 switch (sprn) { 225 switch (sprn) {
186 case SPRN_SRR0: 226 case SPRN_SRR0:
187 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; 227 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break;
188 case SPRN_SRR1: 228 case SPRN_SRR1:
189 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; 229 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break;
190 case SPRN_PVR: 230 case SPRN_PVR:
191 vcpu->arch.gpr[rt] = mfspr(SPRN_PVR); break; 231 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
192 case SPRN_PIR: 232 case SPRN_PIR:
193 vcpu->arch.gpr[rt] = mfspr(SPRN_PIR); break; 233 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
234 case SPRN_MSSSR0:
235 kvmppc_set_gpr(vcpu, rt, 0); break;
194 236
195 /* Note: mftb and TBRL/TBWL are user-accessible, so 237 /* Note: mftb and TBRL/TBWL are user-accessible, so
196 * the guest can always access the real TB anyways. 238 * the guest can always access the real TB anyways.
197 * In fact, we probably will never see these traps. */ 239 * In fact, we probably will never see these traps. */
198 case SPRN_TBWL: 240 case SPRN_TBWL:
199 vcpu->arch.gpr[rt] = mftbl(); break; 241 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
200 case SPRN_TBWU: 242 case SPRN_TBWU:
201 vcpu->arch.gpr[rt] = mftbu(); break; 243 kvmppc_set_gpr(vcpu, rt, get_tb()); break;
202 244
203 case SPRN_SPRG0: 245 case SPRN_SPRG0:
204 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; 246 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break;
205 case SPRN_SPRG1: 247 case SPRN_SPRG1:
206 vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; 248 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break;
207 case SPRN_SPRG2: 249 case SPRN_SPRG2:
208 vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; 250 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break;
209 case SPRN_SPRG3: 251 case SPRN_SPRG3:
210 vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; 252 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break;
211 /* Note: SPRG4-7 are user-readable, so we don't get 253 /* Note: SPRG4-7 are user-readable, so we don't get
212 * a trap. */ 254 * a trap. */
213 255
256 case SPRN_DEC:
257 {
258 u64 jd = get_tb() - vcpu->arch.dec_jiffies;
259 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
260 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n",
261 vcpu->arch.dec, jd,
262 kvmppc_get_gpr(vcpu, rt));
263 break;
264 }
214 default: 265 default:
215 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); 266 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
216 if (emulated == EMULATE_FAIL) { 267 if (emulated == EMULATE_FAIL) {
217 printk("mfspr: unknown spr %x\n", sprn); 268 printk("mfspr: unknown spr %x\n", sprn);
218 vcpu->arch.gpr[rt] = 0; 269 kvmppc_set_gpr(vcpu, rt, 0);
219 } 270 }
220 break; 271 break;
221 } 272 }
@@ -227,7 +278,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
227 rb = get_rb(inst); 278 rb = get_rb(inst);
228 279
229 emulated = kvmppc_handle_store(run, vcpu, 280 emulated = kvmppc_handle_store(run, vcpu,
230 vcpu->arch.gpr[rs], 281 kvmppc_get_gpr(vcpu, rs),
231 2, 1); 282 2, 1);
232 break; 283 break;
233 284
@@ -236,14 +287,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
236 ra = get_ra(inst); 287 ra = get_ra(inst);
237 rb = get_rb(inst); 288 rb = get_rb(inst);
238 289
239 ea = vcpu->arch.gpr[rb]; 290 ea = kvmppc_get_gpr(vcpu, rb);
240 if (ra) 291 if (ra)
241 ea += vcpu->arch.gpr[ra]; 292 ea += kvmppc_get_gpr(vcpu, ra);
242 293
243 emulated = kvmppc_handle_store(run, vcpu, 294 emulated = kvmppc_handle_store(run, vcpu,
244 vcpu->arch.gpr[rs], 295 kvmppc_get_gpr(vcpu, rs),
245 2, 1); 296 2, 1);
246 vcpu->arch.gpr[ra] = ea; 297 kvmppc_set_gpr(vcpu, ra, ea);
247 break; 298 break;
248 299
249 case OP_31_XOP_MTSPR: 300 case OP_31_XOP_MTSPR:
@@ -251,28 +302,30 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
251 rs = get_rs(inst); 302 rs = get_rs(inst);
252 switch (sprn) { 303 switch (sprn) {
253 case SPRN_SRR0: 304 case SPRN_SRR0:
254 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; 305 vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break;
255 case SPRN_SRR1: 306 case SPRN_SRR1:
256 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; 307 vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break;
257 308
258 /* XXX We need to context-switch the timebase for 309 /* XXX We need to context-switch the timebase for
259 * watchdog and FIT. */ 310 * watchdog and FIT. */
260 case SPRN_TBWL: break; 311 case SPRN_TBWL: break;
261 case SPRN_TBWU: break; 312 case SPRN_TBWU: break;
262 313
314 case SPRN_MSSSR0: break;
315
263 case SPRN_DEC: 316 case SPRN_DEC:
264 vcpu->arch.dec = vcpu->arch.gpr[rs]; 317 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
265 kvmppc_emulate_dec(vcpu); 318 kvmppc_emulate_dec(vcpu);
266 break; 319 break;
267 320
268 case SPRN_SPRG0: 321 case SPRN_SPRG0:
269 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; 322 vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break;
270 case SPRN_SPRG1: 323 case SPRN_SPRG1:
271 vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; 324 vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break;
272 case SPRN_SPRG2: 325 case SPRN_SPRG2:
273 vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; 326 vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break;
274 case SPRN_SPRG3: 327 case SPRN_SPRG3:
275 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; 328 vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break;
276 329
277 default: 330 default:
278 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); 331 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
@@ -304,7 +357,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
304 rb = get_rb(inst); 357 rb = get_rb(inst);
305 358
306 emulated = kvmppc_handle_store(run, vcpu, 359 emulated = kvmppc_handle_store(run, vcpu,
307 vcpu->arch.gpr[rs], 360 kvmppc_get_gpr(vcpu, rs),
308 4, 0); 361 4, 0);
309 break; 362 break;
310 363
@@ -319,7 +372,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
319 rb = get_rb(inst); 372 rb = get_rb(inst);
320 373
321 emulated = kvmppc_handle_store(run, vcpu, 374 emulated = kvmppc_handle_store(run, vcpu,
322 vcpu->arch.gpr[rs], 375 kvmppc_get_gpr(vcpu, rs),
323 2, 0); 376 2, 0);
324 break; 377 break;
325 378
@@ -338,7 +391,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
338 ra = get_ra(inst); 391 ra = get_ra(inst);
339 rt = get_rt(inst); 392 rt = get_rt(inst);
340 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 393 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
341 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 394 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
342 break; 395 break;
343 396
344 case OP_LBZ: 397 case OP_LBZ:
@@ -350,35 +403,39 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
350 ra = get_ra(inst); 403 ra = get_ra(inst);
351 rt = get_rt(inst); 404 rt = get_rt(inst);
352 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 405 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
353 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 406 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
354 break; 407 break;
355 408
356 case OP_STW: 409 case OP_STW:
357 rs = get_rs(inst); 410 rs = get_rs(inst);
358 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 411 emulated = kvmppc_handle_store(run, vcpu,
412 kvmppc_get_gpr(vcpu, rs),
359 4, 1); 413 4, 1);
360 break; 414 break;
361 415
362 case OP_STWU: 416 case OP_STWU:
363 ra = get_ra(inst); 417 ra = get_ra(inst);
364 rs = get_rs(inst); 418 rs = get_rs(inst);
365 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 419 emulated = kvmppc_handle_store(run, vcpu,
420 kvmppc_get_gpr(vcpu, rs),
366 4, 1); 421 4, 1);
367 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 422 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
368 break; 423 break;
369 424
370 case OP_STB: 425 case OP_STB:
371 rs = get_rs(inst); 426 rs = get_rs(inst);
372 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 427 emulated = kvmppc_handle_store(run, vcpu,
428 kvmppc_get_gpr(vcpu, rs),
373 1, 1); 429 1, 1);
374 break; 430 break;
375 431
376 case OP_STBU: 432 case OP_STBU:
377 ra = get_ra(inst); 433 ra = get_ra(inst);
378 rs = get_rs(inst); 434 rs = get_rs(inst);
379 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 435 emulated = kvmppc_handle_store(run, vcpu,
436 kvmppc_get_gpr(vcpu, rs),
380 1, 1); 437 1, 1);
381 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 438 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
382 break; 439 break;
383 440
384 case OP_LHZ: 441 case OP_LHZ:
@@ -390,21 +447,23 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
390 ra = get_ra(inst); 447 ra = get_ra(inst);
391 rt = get_rt(inst); 448 rt = get_rt(inst);
392 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 449 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
393 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 450 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
394 break; 451 break;
395 452
396 case OP_STH: 453 case OP_STH:
397 rs = get_rs(inst); 454 rs = get_rs(inst);
398 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 455 emulated = kvmppc_handle_store(run, vcpu,
456 kvmppc_get_gpr(vcpu, rs),
399 2, 1); 457 2, 1);
400 break; 458 break;
401 459
402 case OP_STHU: 460 case OP_STHU:
403 ra = get_ra(inst); 461 ra = get_ra(inst);
404 rs = get_rs(inst); 462 rs = get_rs(inst);
405 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 463 emulated = kvmppc_handle_store(run, vcpu,
464 kvmppc_get_gpr(vcpu, rs),
406 2, 1); 465 2, 1);
407 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; 466 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
408 break; 467 break;
409 468
410 default: 469 default:
@@ -417,6 +476,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
417 advance = 0; 476 advance = 0;
418 printk(KERN_ERR "Couldn't emulate instruction 0x%08x " 477 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
419 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); 478 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
479 kvmppc_core_queue_program(vcpu, 0);
420 } 480 }
421 } 481 }
422 482