aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/kvm.c39
-rw-r--r--arch/powerpc/kernel/kvm_emul.S17
2 files changed, 40 insertions, 16 deletions
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index c8bab24ff8ac..10b681c092ed 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -42,6 +42,7 @@
42#define KVM_INST_B_MAX 0x01ffffff 42#define KVM_INST_B_MAX 0x01ffffff
43 43
44#define KVM_MASK_RT 0x03e00000 44#define KVM_MASK_RT 0x03e00000
45#define KVM_RT_30 0x03c00000
45#define KVM_MASK_RB 0x0000f800 46#define KVM_MASK_RB 0x0000f800
46#define KVM_INST_MFMSR 0x7c0000a6 47#define KVM_INST_MFMSR 0x7c0000a6
47#define KVM_INST_MFSPR_SPRG0 0x7c1042a6 48#define KVM_INST_MFSPR_SPRG0 0x7c1042a6
@@ -82,6 +83,15 @@ static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
82 flush_icache_range((ulong)inst, (ulong)inst + 4); 83 flush_icache_range((ulong)inst, (ulong)inst + 4);
83} 84}
84 85
86static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
87{
88#ifdef CONFIG_64BIT
89 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
90#else
91 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
92#endif
93}
94
85static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt) 95static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
86{ 96{
87#ifdef CONFIG_64BIT 97#ifdef CONFIG_64BIT
@@ -186,7 +196,6 @@ static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
186extern u32 kvm_emulate_mtmsr_branch_offs; 196extern u32 kvm_emulate_mtmsr_branch_offs;
187extern u32 kvm_emulate_mtmsr_reg1_offs; 197extern u32 kvm_emulate_mtmsr_reg1_offs;
188extern u32 kvm_emulate_mtmsr_reg2_offs; 198extern u32 kvm_emulate_mtmsr_reg2_offs;
189extern u32 kvm_emulate_mtmsr_reg3_offs;
190extern u32 kvm_emulate_mtmsr_orig_ins_offs; 199extern u32 kvm_emulate_mtmsr_orig_ins_offs;
191extern u32 kvm_emulate_mtmsr_len; 200extern u32 kvm_emulate_mtmsr_len;
192extern u32 kvm_emulate_mtmsr[]; 201extern u32 kvm_emulate_mtmsr[];
@@ -216,9 +225,27 @@ static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
216 /* Modify the chunk to fit the invocation */ 225 /* Modify the chunk to fit the invocation */
217 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4); 226 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
218 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK; 227 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
219 p[kvm_emulate_mtmsr_reg1_offs] |= rt; 228
220 p[kvm_emulate_mtmsr_reg2_offs] |= rt; 229 /* Make clobbered registers work too */
221 p[kvm_emulate_mtmsr_reg3_offs] |= rt; 230 switch (get_rt(rt)) {
231 case 30:
232 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
233 magic_var(scratch2), KVM_RT_30);
234 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
235 magic_var(scratch2), KVM_RT_30);
236 break;
237 case 31:
238 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
239 magic_var(scratch1), KVM_RT_30);
240 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
241 magic_var(scratch1), KVM_RT_30);
242 break;
243 default:
244 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
245 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
246 break;
247 }
248
222 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst; 249 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
223 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4); 250 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
224 251
@@ -402,9 +429,7 @@ static void kvm_check_ins(u32 *inst, u32 features)
402 break; 429 break;
403 case KVM_INST_MTMSR: 430 case KVM_INST_MTMSR:
404 case KVM_INST_MTMSRD_L0: 431 case KVM_INST_MTMSRD_L0:
405 /* We use r30 and r31 during the hook */ 432 kvm_patch_ins_mtmsr(inst, inst_rt);
406 if (get_rt(inst_rt) < 30)
407 kvm_patch_ins_mtmsr(inst, inst_rt);
408 break; 433 break;
409 } 434 }
410 435
diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S
index a6e97e7a55e0..65305325250b 100644
--- a/arch/powerpc/kernel/kvm_emul.S
+++ b/arch/powerpc/kernel/kvm_emul.S
@@ -135,7 +135,8 @@ kvm_emulate_mtmsr:
135 135
136 /* Find the changed bits between old and new MSR */ 136 /* Find the changed bits between old and new MSR */
137kvm_emulate_mtmsr_reg1: 137kvm_emulate_mtmsr_reg1:
138 xor r31, r0, r31 138 ori r30, r0, 0
139 xor r31, r30, r31
139 140
140 /* Check if we need to really do mtmsr */ 141 /* Check if we need to really do mtmsr */
141 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS) 142 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
@@ -156,14 +157,17 @@ kvm_emulate_mtmsr_orig_ins:
156 157
157maybe_stay_in_guest: 158maybe_stay_in_guest:
158 159
160 /* Get the target register in r30 */
161kvm_emulate_mtmsr_reg2:
162 ori r30, r0, 0
163
159 /* Check if we have to fetch an interrupt */ 164 /* Check if we have to fetch an interrupt */
160 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) 165 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
161 cmpwi r31, 0 166 cmpwi r31, 0
162 beq+ no_mtmsr 167 beq+ no_mtmsr
163 168
164 /* Check if we may trigger an interrupt */ 169 /* Check if we may trigger an interrupt */
165kvm_emulate_mtmsr_reg2: 170 andi. r31, r30, MSR_EE
166 andi. r31, r0, MSR_EE
167 beq no_mtmsr 171 beq no_mtmsr
168 172
169 b do_mtmsr 173 b do_mtmsr
@@ -171,8 +175,7 @@ kvm_emulate_mtmsr_reg2:
171no_mtmsr: 175no_mtmsr:
172 176
173 /* Put MSR into magic page because we don't call mtmsr */ 177 /* Put MSR into magic page because we don't call mtmsr */
174kvm_emulate_mtmsr_reg3: 178 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
175 STL64(r0, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
176 179
177 SCRATCH_RESTORE 180 SCRATCH_RESTORE
178 181
@@ -193,10 +196,6 @@ kvm_emulate_mtmsr_reg1_offs:
193kvm_emulate_mtmsr_reg2_offs: 196kvm_emulate_mtmsr_reg2_offs:
194 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4 197 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
195 198
196.global kvm_emulate_mtmsr_reg3_offs
197kvm_emulate_mtmsr_reg3_offs:
198 .long (kvm_emulate_mtmsr_reg3 - kvm_emulate_mtmsr) / 4
199
200.global kvm_emulate_mtmsr_orig_ins_offs 199.global kvm_emulate_mtmsr_orig_ins_offs
201kvm_emulate_mtmsr_orig_ins_offs: 200kvm_emulate_mtmsr_orig_ins_offs:
202 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4 201 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4