aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/kvm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/kvm.c')
-rw-r--r--arch/powerpc/kernel/kvm.c39
1 files changed, 32 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index c8bab24ff8a..10b681c092e 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -42,6 +42,7 @@
42#define KVM_INST_B_MAX 0x01ffffff 42#define KVM_INST_B_MAX 0x01ffffff
43 43
44#define KVM_MASK_RT 0x03e00000 44#define KVM_MASK_RT 0x03e00000
45#define KVM_RT_30 0x03c00000
45#define KVM_MASK_RB 0x0000f800 46#define KVM_MASK_RB 0x0000f800
46#define KVM_INST_MFMSR 0x7c0000a6 47#define KVM_INST_MFMSR 0x7c0000a6
47#define KVM_INST_MFSPR_SPRG0 0x7c1042a6 48#define KVM_INST_MFSPR_SPRG0 0x7c1042a6
@@ -82,6 +83,15 @@ static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
82 flush_icache_range((ulong)inst, (ulong)inst + 4); 83 flush_icache_range((ulong)inst, (ulong)inst + 4);
83} 84}
84 85
86static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
87{
88#ifdef CONFIG_64BIT
89 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
90#else
91 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
92#endif
93}
94
85static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt) 95static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
86{ 96{
87#ifdef CONFIG_64BIT 97#ifdef CONFIG_64BIT
@@ -186,7 +196,6 @@ static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
186extern u32 kvm_emulate_mtmsr_branch_offs; 196extern u32 kvm_emulate_mtmsr_branch_offs;
187extern u32 kvm_emulate_mtmsr_reg1_offs; 197extern u32 kvm_emulate_mtmsr_reg1_offs;
188extern u32 kvm_emulate_mtmsr_reg2_offs; 198extern u32 kvm_emulate_mtmsr_reg2_offs;
189extern u32 kvm_emulate_mtmsr_reg3_offs;
190extern u32 kvm_emulate_mtmsr_orig_ins_offs; 199extern u32 kvm_emulate_mtmsr_orig_ins_offs;
191extern u32 kvm_emulate_mtmsr_len; 200extern u32 kvm_emulate_mtmsr_len;
192extern u32 kvm_emulate_mtmsr[]; 201extern u32 kvm_emulate_mtmsr[];
@@ -216,9 +225,27 @@ static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
216 /* Modify the chunk to fit the invocation */ 225 /* Modify the chunk to fit the invocation */
217 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4); 226 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
218 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK; 227 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
219 p[kvm_emulate_mtmsr_reg1_offs] |= rt; 228
220 p[kvm_emulate_mtmsr_reg2_offs] |= rt; 229 /* Make clobbered registers work too */
221 p[kvm_emulate_mtmsr_reg3_offs] |= rt; 230 switch (get_rt(rt)) {
231 case 30:
232 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
233 magic_var(scratch2), KVM_RT_30);
234 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
235 magic_var(scratch2), KVM_RT_30);
236 break;
237 case 31:
238 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
239 magic_var(scratch1), KVM_RT_30);
240 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
241 magic_var(scratch1), KVM_RT_30);
242 break;
243 default:
244 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
245 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
246 break;
247 }
248
222 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst; 249 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
223 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4); 250 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
224 251
@@ -402,9 +429,7 @@ static void kvm_check_ins(u32 *inst, u32 features)
402 break; 429 break;
403 case KVM_INST_MTMSR: 430 case KVM_INST_MTMSR:
404 case KVM_INST_MTMSRD_L0: 431 case KVM_INST_MTMSRD_L0:
405 /* We use r30 and r31 during the hook */ 432 kvm_patch_ins_mtmsr(inst, inst_rt);
406 if (get_rt(inst_rt) < 30)
407 kvm_patch_ins_mtmsr(inst, inst_rt);
408 break; 433 break;
409 } 434 }
410 435