diff options
author | Alexander Graf <agraf@suse.de> | 2012-02-15 18:24:28 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-04-08 05:54:56 -0400 |
commit | 1d628af78a28c77143bcdd4ed09e93bb235d4198 (patch) | |
tree | 68c62eaade6f5e183fc043a3b423bd7cf434b6de /arch | |
parent | a2723ce7fe4b99bc2df492067c3f81de2ee89aab (diff) |
KVM: PPC: e500mc: add load inst fixup
There's always a chance we're unable to read a guest instruction. The guest
could have its TLB mapped execute-, but not readable, something odd happens
and our TLB gets flushed. So it's a good idea to be prepared for that case
and have a fallback that allows us to fix things up in that case.
Add fixup code that keeps guest code from potentially crashing our host kernel.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/kvm/bookehv_interrupts.S | 30 |
1 files changed, 29 insertions, 1 deletions
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 63023ae14da4..e9e735057939 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/asm-compat.h> | 28 | #include <asm/asm-compat.h> |
29 | #include <asm/asm-offsets.h> | 29 | #include <asm/asm-offsets.h> |
30 | #include <asm/bitsperlong.h> | 30 | #include <asm/bitsperlong.h> |
31 | #include <asm/thread_info.h> | ||
31 | 32 | ||
32 | #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ | 33 | #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ |
33 | 34 | ||
@@ -171,9 +172,36 @@ | |||
171 | PPC_STL r30, VCPU_GPR(r30)(r4) | 172 | PPC_STL r30, VCPU_GPR(r30)(r4) |
172 | PPC_STL r31, VCPU_GPR(r31)(r4) | 173 | PPC_STL r31, VCPU_GPR(r31)(r4) |
173 | mtspr SPRN_EPLC, r8 | 174 | mtspr SPRN_EPLC, r8 |
175 | |||
176 | /* disable preemption, so we are sure we hit the fixup handler */ | ||
177 | #ifdef CONFIG_PPC64 | ||
178 | clrrdi r8,r1,THREAD_SHIFT | ||
179 | #else | ||
180 | rlwinm r8,r1,0,0,31-THREAD_SHIFT /* current thread_info */ | ||
181 | #endif | ||
182 | li r7, 1 | ||
183 | stw r7, TI_PREEMPT(r8) | ||
184 | |||
174 | isync | 185 | isync |
175 | lwepx r9, 0, r5 | 186 | |
187 | /* | ||
188 | * In case the read goes wrong, we catch it and write an invalid value | ||
189 | * in LAST_INST instead. | ||
190 | */ | ||
191 | 1: lwepx r9, 0, r5 | ||
192 | 2: | ||
193 | .section .fixup, "ax" | ||
194 | 3: li r9, KVM_INST_FETCH_FAILED | ||
195 | b 2b | ||
196 | .previous | ||
197 | .section __ex_table,"a" | ||
198 | PPC_LONG_ALIGN | ||
199 | PPC_LONG 1b,3b | ||
200 | .previous | ||
201 | |||
176 | mtspr SPRN_EPLC, r3 | 202 | mtspr SPRN_EPLC, r3 |
203 | li r7, 0 | ||
204 | stw r7, TI_PREEMPT(r8) | ||
177 | stw r9, VCPU_LAST_INST(r4) | 205 | stw r9, VCPU_LAST_INST(r4) |
178 | .endif | 206 | .endif |
179 | 207 | ||