diff options
author | Alexander Graf <agraf@suse.de> | 2010-07-29 08:48:05 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:50:56 -0400 |
commit | 7810927760a0d16d7a41be4dab895fbbf9445bc0 (patch) | |
tree | f215dd900d26d691c05f9b48233b7ef910b4fb1d /arch/powerpc/kernel | |
parent | 819a63dc792b0888edd3eda306a9e1e049dcbb1c (diff) |
KVM: PPC: PV mtmsrd L=0 and mtmsr
There is also a form of mtmsr where all bits need to be addressed. While the
PPC64 Linux kernel behaves resonably well here, on PPC32 we do not have an
L=1 form. It does mtmsr even for simple things like only changing EE.
So we need to hook into that one as well and check for a mask of bits that we
deem safe to change from within guest context.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/kvm.c | 51 | ||||
-rw-r--r-- | arch/powerpc/kernel/kvm_emul.S | 84 |
2 files changed, 135 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 717ab0dded25..8ac57e2c52fa 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c | |||
@@ -63,7 +63,9 @@ | |||
63 | #define KVM_INST_MTSPR_DSISR 0x7c1203a6 | 63 | #define KVM_INST_MTSPR_DSISR 0x7c1203a6 |
64 | 64 | ||
65 | #define KVM_INST_TLBSYNC 0x7c00046c | 65 | #define KVM_INST_TLBSYNC 0x7c00046c |
66 | #define KVM_INST_MTMSRD_L0 0x7c000164 | ||
66 | #define KVM_INST_MTMSRD_L1 0x7c010164 | 67 | #define KVM_INST_MTMSRD_L1 0x7c010164 |
68 | #define KVM_INST_MTMSR 0x7c000124 | ||
67 | 69 | ||
68 | static bool kvm_patching_worked = true; | 70 | static bool kvm_patching_worked = true; |
69 | static char kvm_tmp[1024 * 1024]; | 71 | static char kvm_tmp[1024 * 1024]; |
@@ -176,6 +178,49 @@ static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt) | |||
176 | kvm_patch_ins_b(inst, distance_start); | 178 | kvm_patch_ins_b(inst, distance_start); |
177 | } | 179 | } |
178 | 180 | ||
181 | extern u32 kvm_emulate_mtmsr_branch_offs; | ||
182 | extern u32 kvm_emulate_mtmsr_reg1_offs; | ||
183 | extern u32 kvm_emulate_mtmsr_reg2_offs; | ||
184 | extern u32 kvm_emulate_mtmsr_reg3_offs; | ||
185 | extern u32 kvm_emulate_mtmsr_orig_ins_offs; | ||
186 | extern u32 kvm_emulate_mtmsr_len; | ||
187 | extern u32 kvm_emulate_mtmsr[]; | ||
188 | |||
189 | static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt) | ||
190 | { | ||
191 | u32 *p; | ||
192 | int distance_start; | ||
193 | int distance_end; | ||
194 | ulong next_inst; | ||
195 | |||
196 | p = kvm_alloc(kvm_emulate_mtmsr_len * 4); | ||
197 | if (!p) | ||
198 | return; | ||
199 | |||
200 | /* Find out where we are and put everything there */ | ||
201 | distance_start = (ulong)p - (ulong)inst; | ||
202 | next_inst = ((ulong)inst + 4); | ||
203 | distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs]; | ||
204 | |||
205 | /* Make sure we only write valid b instructions */ | ||
206 | if (distance_start > KVM_INST_B_MAX) { | ||
207 | kvm_patching_worked = false; | ||
208 | return; | ||
209 | } | ||
210 | |||
211 | /* Modify the chunk to fit the invocation */ | ||
212 | memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4); | ||
213 | p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK; | ||
214 | p[kvm_emulate_mtmsr_reg1_offs] |= rt; | ||
215 | p[kvm_emulate_mtmsr_reg2_offs] |= rt; | ||
216 | p[kvm_emulate_mtmsr_reg3_offs] |= rt; | ||
217 | p[kvm_emulate_mtmsr_orig_ins_offs] = *inst; | ||
218 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4); | ||
219 | |||
220 | /* Patch the invocation */ | ||
221 | kvm_patch_ins_b(inst, distance_start); | ||
222 | } | ||
223 | |||
179 | static void kvm_map_magic_page(void *data) | 224 | static void kvm_map_magic_page(void *data) |
180 | { | 225 | { |
181 | kvm_hypercall2(KVM_HC_PPC_MAP_MAGIC_PAGE, | 226 | kvm_hypercall2(KVM_HC_PPC_MAP_MAGIC_PAGE, |
@@ -256,6 +301,12 @@ static void kvm_check_ins(u32 *inst) | |||
256 | if (get_rt(inst_rt) < 30) | 301 | if (get_rt(inst_rt) < 30) |
257 | kvm_patch_ins_mtmsrd(inst, inst_rt); | 302 | kvm_patch_ins_mtmsrd(inst, inst_rt); |
258 | break; | 303 | break; |
304 | case KVM_INST_MTMSR: | ||
305 | case KVM_INST_MTMSRD_L0: | ||
306 | /* We use r30 and r31 during the hook */ | ||
307 | if (get_rt(inst_rt) < 30) | ||
308 | kvm_patch_ins_mtmsr(inst, inst_rt); | ||
309 | break; | ||
259 | } | 310 | } |
260 | 311 | ||
261 | switch (_inst) { | 312 | switch (_inst) { |
diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S index 10dc4a6632fd..8cd22f47dd01 100644 --- a/arch/powerpc/kernel/kvm_emul.S +++ b/arch/powerpc/kernel/kvm_emul.S | |||
@@ -120,3 +120,87 @@ kvm_emulate_mtmsrd_reg_offs: | |||
120 | .global kvm_emulate_mtmsrd_len | 120 | .global kvm_emulate_mtmsrd_len |
121 | kvm_emulate_mtmsrd_len: | 121 | kvm_emulate_mtmsrd_len: |
122 | .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4 | 122 | .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4 |
123 | |||
124 | |||
125 | #define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI) | ||
126 | #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS | ||
127 | |||
128 | .global kvm_emulate_mtmsr | ||
129 | kvm_emulate_mtmsr: | ||
130 | |||
131 | SCRATCH_SAVE | ||
132 | |||
133 | /* Fetch old MSR in r31 */ | ||
134 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
135 | |||
136 | /* Find the changed bits between old and new MSR */ | ||
137 | kvm_emulate_mtmsr_reg1: | ||
138 | xor r31, r0, r31 | ||
139 | |||
140 | /* Check if we need to really do mtmsr */ | ||
141 | LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS) | ||
142 | and. r31, r31, r30 | ||
143 | |||
144 | /* No critical bits changed? Maybe we can stay in the guest. */ | ||
145 | beq maybe_stay_in_guest | ||
146 | |||
147 | do_mtmsr: | ||
148 | |||
149 | SCRATCH_RESTORE | ||
150 | |||
151 | /* Just fire off the mtmsr if it's critical */ | ||
152 | kvm_emulate_mtmsr_orig_ins: | ||
153 | mtmsr r0 | ||
154 | |||
155 | b kvm_emulate_mtmsr_branch | ||
156 | |||
157 | maybe_stay_in_guest: | ||
158 | |||
159 | /* Check if we have to fetch an interrupt */ | ||
160 | lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) | ||
161 | cmpwi r31, 0 | ||
162 | beq+ no_mtmsr | ||
163 | |||
164 | /* Check if we may trigger an interrupt */ | ||
165 | kvm_emulate_mtmsr_reg2: | ||
166 | andi. r31, r0, MSR_EE | ||
167 | beq no_mtmsr | ||
168 | |||
169 | b do_mtmsr | ||
170 | |||
171 | no_mtmsr: | ||
172 | |||
173 | /* Put MSR into magic page because we don't call mtmsr */ | ||
174 | kvm_emulate_mtmsr_reg3: | ||
175 | STL64(r0, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
176 | |||
177 | SCRATCH_RESTORE | ||
178 | |||
179 | /* Go back to caller */ | ||
180 | kvm_emulate_mtmsr_branch: | ||
181 | b . | ||
182 | kvm_emulate_mtmsr_end: | ||
183 | |||
184 | .global kvm_emulate_mtmsr_branch_offs | ||
185 | kvm_emulate_mtmsr_branch_offs: | ||
186 | .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4 | ||
187 | |||
188 | .global kvm_emulate_mtmsr_reg1_offs | ||
189 | kvm_emulate_mtmsr_reg1_offs: | ||
190 | .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4 | ||
191 | |||
192 | .global kvm_emulate_mtmsr_reg2_offs | ||
193 | kvm_emulate_mtmsr_reg2_offs: | ||
194 | .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4 | ||
195 | |||
196 | .global kvm_emulate_mtmsr_reg3_offs | ||
197 | kvm_emulate_mtmsr_reg3_offs: | ||
198 | .long (kvm_emulate_mtmsr_reg3 - kvm_emulate_mtmsr) / 4 | ||
199 | |||
200 | .global kvm_emulate_mtmsr_orig_ins_offs | ||
201 | kvm_emulate_mtmsr_orig_ins_offs: | ||
202 | .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4 | ||
203 | |||
204 | .global kvm_emulate_mtmsr_len | ||
205 | kvm_emulate_mtmsr_len: | ||
206 | .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4 | ||