diff options
author | Alexander Graf <agraf@suse.de> | 2012-02-15 18:06:24 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-04-08 05:54:54 -0400 |
commit | a2723ce7fe4b99bc2df492067c3f81de2ee89aab (patch) | |
tree | 9481a79b48e07e4d2eab09a0aaef0e659f017ddb /arch/powerpc/kvm | |
parent | 79300f8cb9be201f916d075b3ef2e032d83a0d75 (diff) |
KVM: PPC: e500mc: Move r1/r2 restoration very early
If we hit any exception whatsoever in the restore path and r1/r2 aren't the
host registers, we don't get a working oops. So it's always a good idea to
restore them as early as possible.
This time, it actually has practical reasons to do so too, since we need to
have the host page fault handler fix up our guest instruction read code. And
for that to work we need r1/r2 restored.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/bookehv_interrupts.S | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 9eaeebd86e44..63023ae14da4 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S | |||
@@ -67,6 +67,12 @@ | |||
67 | * saved in vcpu: cr, ctr, r3-r13 | 67 | * saved in vcpu: cr, ctr, r3-r13 |
68 | */ | 68 | */ |
69 | .macro kvm_handler_common intno, srr0, flags | 69 | .macro kvm_handler_common intno, srr0, flags |
70 | /* Restore host stack pointer */ | ||
71 | PPC_STL r1, VCPU_GPR(r1)(r4) | ||
72 | PPC_STL r2, VCPU_GPR(r2)(r4) | ||
73 | PPC_LL r1, VCPU_HOST_STACK(r4) | ||
74 | PPC_LL r2, HOST_R2(r1) | ||
75 | |||
70 | mfspr r10, SPRN_PID | 76 | mfspr r10, SPRN_PID |
71 | lwz r8, VCPU_HOST_PID(r4) | 77 | lwz r8, VCPU_HOST_PID(r4) |
72 | PPC_LL r11, VCPU_SHARED(r4) | 78 | PPC_LL r11, VCPU_SHARED(r4) |
@@ -290,10 +296,8 @@ _GLOBAL(kvmppc_resume_host) | |||
290 | /* Save remaining volatile guest register state to vcpu. */ | 296 | /* Save remaining volatile guest register state to vcpu. */ |
291 | mfspr r3, SPRN_VRSAVE | 297 | mfspr r3, SPRN_VRSAVE |
292 | PPC_STL r0, VCPU_GPR(r0)(r4) | 298 | PPC_STL r0, VCPU_GPR(r0)(r4) |
293 | PPC_STL r1, VCPU_GPR(r1)(r4) | ||
294 | mflr r5 | 299 | mflr r5 |
295 | mfspr r6, SPRN_SPRG4 | 300 | mfspr r6, SPRN_SPRG4 |
296 | PPC_STL r2, VCPU_GPR(r2)(r4) | ||
297 | PPC_STL r5, VCPU_LR(r4) | 301 | PPC_STL r5, VCPU_LR(r4) |
298 | mfspr r7, SPRN_SPRG5 | 302 | mfspr r7, SPRN_SPRG5 |
299 | PPC_STL r3, VCPU_VRSAVE(r4) | 303 | PPC_STL r3, VCPU_VRSAVE(r4) |
@@ -334,10 +338,6 @@ _GLOBAL(kvmppc_resume_host) | |||
334 | mtspr SPRN_EPCR, r3 | 338 | mtspr SPRN_EPCR, r3 |
335 | isync | 339 | isync |
336 | 340 | ||
337 | /* Restore host stack pointer */ | ||
338 | PPC_LL r1, VCPU_HOST_STACK(r4) | ||
339 | PPC_LL r2, HOST_R2(r1) | ||
340 | |||
341 | /* Switch to kernel stack and jump to handler. */ | 341 | /* Switch to kernel stack and jump to handler. */ |
342 | PPC_LL r3, HOST_RUN(r1) | 342 | PPC_LL r3, HOST_RUN(r1) |
343 | mr r5, r14 /* intno */ | 343 | mr r5, r14 /* intno */ |