aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2013-11-28 20:24:18 -0500
committerAlexander Graf <agraf@suse.de>2013-12-09 03:41:26 -0500
commitd825a04387ff4ce66117306f2862c7cedca5c597 (patch)
tree75439bbacc1513300463151eaa09dbd79c82e7fc /arch/powerpc
parent91648ec09c1ef69c4d840ab6dab391bfb452d554 (diff)
KVM: PPC: Book3S: PR: Don't clobber our exit handler id
We call a C helper to save all svcpu fields into our vcpu. The C ABI states that r12 is considered volatile. However, we keep our exit handler id in r12 currently. So we need to save it away into a non-volatile register instead that definitely does get preserved across the C call. This bug usually didn't hit anyone yet since gcc is smart enough to generate code that doesn't even need r12 which means it stayed identical throughout the call by sheer luck. But we can't rely on that. Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S13
1 files changed, 10 insertions, 3 deletions
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index f4dd041c14ea..5e7cb32ce4dc 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -132,9 +132,17 @@ kvm_start_lightweight:
132 * 132 *
133 */ 133 */
134 134
135 PPC_LL r3, GPR4(r1) /* vcpu pointer */
136
137 /*
138 * kvmppc_copy_from_svcpu can clobber volatile registers, save
139 * the exit handler id to the vcpu and restore it from there later.
140 */
141 stw r12, VCPU_TRAP(r3)
142
135 /* Transfer reg values from shadow vcpu back to vcpu struct */ 143 /* Transfer reg values from shadow vcpu back to vcpu struct */
136 /* On 64-bit, interrupts are still off at this point */ 144 /* On 64-bit, interrupts are still off at this point */
137 PPC_LL r3, GPR4(r1) /* vcpu pointer */ 145
138 GET_SHADOW_VCPU(r4) 146 GET_SHADOW_VCPU(r4)
139 bl FUNC(kvmppc_copy_from_svcpu) 147 bl FUNC(kvmppc_copy_from_svcpu)
140 nop 148 nop
@@ -151,7 +159,6 @@ kvm_start_lightweight:
151 */ 159 */
152 ld r3, PACA_SPRG3(r13) 160 ld r3, PACA_SPRG3(r13)
153 mtspr SPRN_SPRG3, r3 161 mtspr SPRN_SPRG3, r3
154
155#endif /* CONFIG_PPC_BOOK3S_64 */ 162#endif /* CONFIG_PPC_BOOK3S_64 */
156 163
157 /* R7 = vcpu */ 164 /* R7 = vcpu */
@@ -177,7 +184,7 @@ kvm_start_lightweight:
177 PPC_STL r31, VCPU_GPR(R31)(r7) 184 PPC_STL r31, VCPU_GPR(R31)(r7)
178 185
179 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ 186 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
180 mr r5, r12 187 lwz r5, VCPU_TRAP(r7)
181 188
182 /* Restore r3 (kvm_run) and r4 (vcpu) */ 189 /* Restore r3 (kvm_run) and r4 (vcpu) */
183 REST_2GPRS(3, r1) 190 REST_2GPRS(3, r1)