aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/emulate.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/emulate.c')
-rw-r--r--arch/powerpc/kvm/emulate.c66
1 files changed, 55 insertions, 11 deletions
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 7737146af3fb..4a9ac6640fad 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -18,7 +18,7 @@
18 */ 18 */
19 19
20#include <linux/jiffies.h> 20#include <linux/jiffies.h>
21#include <linux/timer.h> 21#include <linux/hrtimer.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
@@ -32,6 +32,7 @@
32#include "trace.h" 32#include "trace.h"
33 33
34#define OP_TRAP 3 34#define OP_TRAP 3
35#define OP_TRAP_64 2
35 36
36#define OP_31_XOP_LWZX 23 37#define OP_31_XOP_LWZX 23
37#define OP_31_XOP_LBZX 87 38#define OP_31_XOP_LBZX 87
@@ -64,19 +65,45 @@
64#define OP_STH 44 65#define OP_STH 44
65#define OP_STHU 45 66#define OP_STHU 45
66 67
68#ifdef CONFIG_PPC64
69static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
70{
71 return 1;
72}
73#else
74static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
75{
76 return vcpu->arch.tcr & TCR_DIE;
77}
78#endif
79
67void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) 80void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
68{ 81{
69 if (vcpu->arch.tcr & TCR_DIE) { 82 unsigned long dec_nsec;
83
84 pr_debug("mtDEC: %x\n", vcpu->arch.dec);
85#ifdef CONFIG_PPC64
86 /* POWER4+ triggers a dec interrupt if the value is < 0 */
87 if (vcpu->arch.dec & 0x80000000) {
88 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
89 kvmppc_core_queue_dec(vcpu);
90 return;
91 }
92#endif
93 if (kvmppc_dec_enabled(vcpu)) {
70 /* The decrementer ticks at the same rate as the timebase, so 94 /* The decrementer ticks at the same rate as the timebase, so
71 * that's how we convert the guest DEC value to the number of 95 * that's how we convert the guest DEC value to the number of
72 * host ticks. */ 96 * host ticks. */
73 unsigned long nr_jiffies;
74 97
75 nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy; 98 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
76 mod_timer(&vcpu->arch.dec_timer, 99 dec_nsec = vcpu->arch.dec;
77 get_jiffies_64() + nr_jiffies); 100 dec_nsec *= 1000;
101 dec_nsec /= tb_ticks_per_usec;
102 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
103 HRTIMER_MODE_REL);
104 vcpu->arch.dec_jiffies = get_tb();
78 } else { 105 } else {
79 del_timer(&vcpu->arch.dec_timer); 106 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
80 } 107 }
81} 108}
82 109
@@ -111,9 +138,15 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
111 /* this default type might be overwritten by subcategories */ 138 /* this default type might be overwritten by subcategories */
112 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 139 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
113 140
141 pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
142
114 switch (get_op(inst)) { 143 switch (get_op(inst)) {
115 case OP_TRAP: 144 case OP_TRAP:
145#ifdef CONFIG_PPC64
146 case OP_TRAP_64:
147#else
116 vcpu->arch.esr |= ESR_PTR; 148 vcpu->arch.esr |= ESR_PTR;
149#endif
117 kvmppc_core_queue_program(vcpu); 150 kvmppc_core_queue_program(vcpu);
118 advance = 0; 151 advance = 0;
119 break; 152 break;
@@ -188,17 +221,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
188 case SPRN_SRR1: 221 case SPRN_SRR1:
189 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; 222 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
190 case SPRN_PVR: 223 case SPRN_PVR:
191 vcpu->arch.gpr[rt] = mfspr(SPRN_PVR); break; 224 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
192 case SPRN_PIR: 225 case SPRN_PIR:
193 vcpu->arch.gpr[rt] = mfspr(SPRN_PIR); break; 226 vcpu->arch.gpr[rt] = vcpu->vcpu_id; break;
227 case SPRN_MSSSR0:
228 vcpu->arch.gpr[rt] = 0; break;
194 229
195 /* Note: mftb and TBRL/TBWL are user-accessible, so 230 /* Note: mftb and TBRL/TBWL are user-accessible, so
196 * the guest can always access the real TB anyways. 231 * the guest can always access the real TB anyways.
197 * In fact, we probably will never see these traps. */ 232 * In fact, we probably will never see these traps. */
198 case SPRN_TBWL: 233 case SPRN_TBWL:
199 vcpu->arch.gpr[rt] = mftbl(); break; 234 vcpu->arch.gpr[rt] = get_tb() >> 32; break;
200 case SPRN_TBWU: 235 case SPRN_TBWU:
201 vcpu->arch.gpr[rt] = mftbu(); break; 236 vcpu->arch.gpr[rt] = get_tb(); break;
202 237
203 case SPRN_SPRG0: 238 case SPRN_SPRG0:
204 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; 239 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
@@ -211,6 +246,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
211 /* Note: SPRG4-7 are user-readable, so we don't get 246 /* Note: SPRG4-7 are user-readable, so we don't get
212 * a trap. */ 247 * a trap. */
213 248
249 case SPRN_DEC:
250 {
251 u64 jd = get_tb() - vcpu->arch.dec_jiffies;
252 vcpu->arch.gpr[rt] = vcpu->arch.dec - jd;
253 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]);
254 break;
255 }
214 default: 256 default:
215 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); 257 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
216 if (emulated == EMULATE_FAIL) { 258 if (emulated == EMULATE_FAIL) {
@@ -260,6 +302,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
260 case SPRN_TBWL: break; 302 case SPRN_TBWL: break;
261 case SPRN_TBWU: break; 303 case SPRN_TBWU: break;
262 304
305 case SPRN_MSSSR0: break;
306
263 case SPRN_DEC: 307 case SPRN_DEC:
264 vcpu->arch.dec = vcpu->arch.gpr[rs]; 308 vcpu->arch.dec = vcpu->arch.gpr[rs];
265 kvmppc_emulate_dec(vcpu); 309 kvmppc_emulate_dec(vcpu);