aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-06-28 20:20:58 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 06:16:53 -0400
commit3c42bf8a717cb636e0ed2ed77194669e2ac3ed56 (patch)
tree4f543088e6a64ce7f1a771c1618668ff27752ecc /arch/powerpc
parent923c53caea446d246949c94703be83e68f251af7 (diff)
KVM: PPC: Split host-state fields out of kvmppc_book3s_shadow_vcpu
There are several fields in struct kvmppc_book3s_shadow_vcpu that temporarily store bits of host state while a guest is running, rather than anything relating to the particular guest or vcpu. This splits them out into a new kvmppc_host_state structure and modifies the definitions in asm-offsets.c to suit. On 32-bit, we have a kvmppc_host_state structure inside the kvmppc_book3s_shadow_vcpu since the assembly code needs to be able to get to them both with one pointer. On 64-bit they are separate fields in the PACA. This means that on 64-bit we don't need to copy the kvmppc_host_state in and out on vcpu load/unload, and in future will mean that the book3s_hv code doesn't need a shadow_vcpu struct in the PACA at all. That does mean that we have to be careful not to rely on any values persisting in the hstate field of the paca across any point where we could block or get preempted. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/exception-64s.h10
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h27
-rw-r--r--arch/powerpc/include/asm/paca.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c94
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S19
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S18
-rw-r--r--arch/powerpc/kvm/book3s_segment.S76
8 files changed, 127 insertions, 120 deletions
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index b6a3a443fbde..296c9b66c04a 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -96,16 +96,16 @@
96 EXCEPTION_PROLOG_PSERIES_1(label, h); 96 EXCEPTION_PROLOG_PSERIES_1(label, h);
97 97
98#define __KVMTEST(n) \ 98#define __KVMTEST(n) \
99 lbz r10,PACA_KVM_SVCPU+SVCPU_IN_GUEST(r13); \ 99 lbz r10,HSTATE_IN_GUEST(r13); \
100 cmpwi r10,0; \ 100 cmpwi r10,0; \
101 bne do_kvm_##n 101 bne do_kvm_##n
102 102
103#define __KVM_HANDLER(area, h, n) \ 103#define __KVM_HANDLER(area, h, n) \
104do_kvm_##n: \ 104do_kvm_##n: \
105 ld r10,area+EX_R10(r13); \ 105 ld r10,area+EX_R10(r13); \
106 stw r9,PACA_KVM_SVCPU+SVCPU_SCRATCH1(r13); \ 106 stw r9,HSTATE_SCRATCH1(r13); \
107 ld r9,area+EX_R9(r13); \ 107 ld r9,area+EX_R9(r13); \
108 std r12,PACA_KVM_SVCPU+SVCPU_SCRATCH0(r13); \ 108 std r12,HSTATE_SCRATCH0(r13); \
109 li r12,n; \ 109 li r12,n; \
110 b kvmppc_interrupt 110 b kvmppc_interrupt
111 111
@@ -114,9 +114,9 @@ do_kvm_##n: \
114 cmpwi r10,KVM_GUEST_MODE_SKIP; \ 114 cmpwi r10,KVM_GUEST_MODE_SKIP; \
115 ld r10,area+EX_R10(r13); \ 115 ld r10,area+EX_R10(r13); \
116 beq 89f; \ 116 beq 89f; \
117 stw r9,PACA_KVM_SVCPU+SVCPU_SCRATCH1(r13); \ 117 stw r9,HSTATE_SCRATCH1(r13); \
118 ld r9,area+EX_R9(r13); \ 118 ld r9,area+EX_R9(r13); \
119 std r12,PACA_KVM_SVCPU+SVCPU_SCRATCH0(r13); \ 119 std r12,HSTATE_SCRATCH0(r13); \
120 li r12,n; \ 120 li r12,n; \
121 b kvmppc_interrupt; \ 121 b kvmppc_interrupt; \
12289: mtocrf 0x80,r9; \ 12289: mtocrf 0x80,r9; \
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index d5a8a3861635..312617529864 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -60,6 +60,22 @@ kvmppc_resume_\intno:
60 60
61#else /*__ASSEMBLY__ */ 61#else /*__ASSEMBLY__ */
62 62
63/*
64 * This struct goes in the PACA on 64-bit processors. It is used
65 * to store host state that needs to be saved when we enter a guest
66 * and restored when we exit, but isn't specific to any particular
67 * guest or vcpu. It also has some scratch fields used by the guest
68 * exit code.
69 */
70struct kvmppc_host_state {
71 ulong host_r1;
72 ulong host_r2;
73 ulong vmhandler;
74 ulong scratch0;
75 ulong scratch1;
76 u8 in_guest;
77};
78
63struct kvmppc_book3s_shadow_vcpu { 79struct kvmppc_book3s_shadow_vcpu {
64 ulong gpr[14]; 80 ulong gpr[14];
65 u32 cr; 81 u32 cr;
@@ -73,17 +89,12 @@ struct kvmppc_book3s_shadow_vcpu {
73 ulong shadow_srr1; 89 ulong shadow_srr1;
74 ulong fault_dar; 90 ulong fault_dar;
75 91
76 ulong host_r1;
77 ulong host_r2;
78 ulong handler;
79 ulong scratch0;
80 ulong scratch1;
81 ulong vmhandler;
82 u8 in_guest;
83
84#ifdef CONFIG_PPC_BOOK3S_32 92#ifdef CONFIG_PPC_BOOK3S_32
85 u32 sr[16]; /* Guest SRs */ 93 u32 sr[16]; /* Guest SRs */
94
95 struct kvmppc_host_state hstate;
86#endif 96#endif
97
87#ifdef CONFIG_PPC_BOOK3S_64 98#ifdef CONFIG_PPC_BOOK3S_64
88 u8 slb_max; /* highest used guest slb entry */ 99 u8 slb_max; /* highest used guest slb entry */
89 struct { 100 struct {
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 74126765106a..58f4a18ef60c 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -149,6 +149,7 @@ struct paca_struct {
149#ifdef CONFIG_KVM_BOOK3S_HANDLER 149#ifdef CONFIG_KVM_BOOK3S_HANDLER
150 /* We use this to store guest state in */ 150 /* We use this to store guest state in */
151 struct kvmppc_book3s_shadow_vcpu shadow_vcpu; 151 struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
152 struct kvmppc_host_state kvm_hstate;
152#endif 153#endif
153}; 154};
154 155
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index faf846131f45..dabfb7346f36 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -198,11 +198,6 @@ int main(void)
198 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); 198 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
199 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); 199 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
200 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); 200 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
201#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
202 DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
203 DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb));
204 DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max));
205#endif
206#endif /* CONFIG_PPC64 */ 201#endif /* CONFIG_PPC64 */
207 202
208 /* RTAS */ 203 /* RTAS */
@@ -416,49 +411,54 @@ int main(void)
416 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); 411 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
417 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); 412 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
418 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 413 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
419 DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - 414
420 offsetof(struct kvmppc_vcpu_book3s, vcpu)); 415#ifdef CONFIG_PPC_BOOK3S_64
421 DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr)); 416# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
422 DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer)); 417# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
423 DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr)); 418#else /* 32-bit */
424 DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr)); 419# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
425 DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc)); 420# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f))
426 DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0])); 421#endif
427 DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1])); 422
428 DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2])); 423 SVCPU_FIELD(SVCPU_CR, cr);
429 DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3])); 424 SVCPU_FIELD(SVCPU_XER, xer);
430 DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4])); 425 SVCPU_FIELD(SVCPU_CTR, ctr);
431 DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5])); 426 SVCPU_FIELD(SVCPU_LR, lr);
432 DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6])); 427 SVCPU_FIELD(SVCPU_PC, pc);
433 DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7])); 428 SVCPU_FIELD(SVCPU_R0, gpr[0]);
434 DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8])); 429 SVCPU_FIELD(SVCPU_R1, gpr[1]);
435 DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9])); 430 SVCPU_FIELD(SVCPU_R2, gpr[2]);
436 DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10])); 431 SVCPU_FIELD(SVCPU_R3, gpr[3]);
437 DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11])); 432 SVCPU_FIELD(SVCPU_R4, gpr[4]);
438 DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12])); 433 SVCPU_FIELD(SVCPU_R5, gpr[5]);
439 DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13])); 434 SVCPU_FIELD(SVCPU_R6, gpr[6]);
440 DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1)); 435 SVCPU_FIELD(SVCPU_R7, gpr[7]);
441 DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2)); 436 SVCPU_FIELD(SVCPU_R8, gpr[8]);
442 DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu, 437 SVCPU_FIELD(SVCPU_R9, gpr[9]);
443 vmhandler)); 438 SVCPU_FIELD(SVCPU_R10, gpr[10]);
444 DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu, 439 SVCPU_FIELD(SVCPU_R11, gpr[11]);
445 scratch0)); 440 SVCPU_FIELD(SVCPU_R12, gpr[12]);
446 DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu, 441 SVCPU_FIELD(SVCPU_R13, gpr[13]);
447 scratch1)); 442 SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr);
448 DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu, 443 SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar);
449 in_guest)); 444 SVCPU_FIELD(SVCPU_LAST_INST, last_inst);
450 DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu, 445 SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1);
451 fault_dsisr));
452 DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu,
453 fault_dar));
454 DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu,
455 last_inst));
456 DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu,
457 shadow_srr1));
458#ifdef CONFIG_PPC_BOOK3S_32 446#ifdef CONFIG_PPC_BOOK3S_32
459 DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr)); 447 SVCPU_FIELD(SVCPU_SR, sr);
460#endif 448#endif
461#else 449#ifdef CONFIG_PPC64
450 SVCPU_FIELD(SVCPU_SLB, slb);
451 SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
452#endif
453
454 HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
455 HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
456 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
457 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
458 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
459 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
460
461#else /* CONFIG_PPC_BOOK3S */
462 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 462 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
463 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); 463 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
464 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 464 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
@@ -468,7 +468,7 @@ int main(void)
468 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 468 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
469 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); 469 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
470#endif /* CONFIG_PPC_BOOK3S */ 470#endif /* CONFIG_PPC_BOOK3S */
471#endif 471#endif /* CONFIG_KVM */
472 472
473#ifdef CONFIG_KVM_GUEST 473#ifdef CONFIG_KVM_GUEST
474 DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared, 474 DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e76472cbf3b5..6da00550afea 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -298,7 +298,7 @@ data_access_check_stab:
298 srdi r10,r10,60 298 srdi r10,r10,60
299 rlwimi r10,r9,16,0x20 299 rlwimi r10,r9,16,0x20
300#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 300#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
301 lbz r9,PACA_KVM_SVCPU+SVCPU_IN_GUEST(r13) 301 lbz r9,HSTATE_IN_GUEST(r13)
302 rlwimi r10,r9,8,0x300 302 rlwimi r10,r9,8,0x300
303#endif 303#endif
304 mfcr r9 304 mfcr r9
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 8c5e0e160107..c54b0e30cf3f 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -29,8 +29,7 @@
29#define ULONG_SIZE 8 29#define ULONG_SIZE 8
30#define FUNC(name) GLUE(.,name) 30#define FUNC(name) GLUE(.,name)
31 31
32#define GET_SHADOW_VCPU(reg) \ 32#define GET_SHADOW_VCPU_R13
33 addi reg, r13, PACA_KVM_SVCPU
34 33
35#define DISABLE_INTERRUPTS \ 34#define DISABLE_INTERRUPTS \
36 mfmsr r0; \ 35 mfmsr r0; \
@@ -43,8 +42,8 @@
43#define ULONG_SIZE 4 42#define ULONG_SIZE 4
44#define FUNC(name) name 43#define FUNC(name) name
45 44
46#define GET_SHADOW_VCPU(reg) \ 45#define GET_SHADOW_VCPU_R13 \
47 lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2) 46 lwz r13, (THREAD + THREAD_KVM_SVCPU)(r2)
48 47
49#define DISABLE_INTERRUPTS \ 48#define DISABLE_INTERRUPTS \
50 mfmsr r0; \ 49 mfmsr r0; \
@@ -107,17 +106,11 @@ kvm_start_entry:
107 /* Load non-volatile guest state from the vcpu */ 106 /* Load non-volatile guest state from the vcpu */
108 VCPU_LOAD_NVGPRS(r4) 107 VCPU_LOAD_NVGPRS(r4)
109 108
110 GET_SHADOW_VCPU(r5) 109kvm_start_lightweight:
111
112 /* Save R1/R2 in the PACA */
113 PPC_STL r1, SVCPU_HOST_R1(r5)
114 PPC_STL r2, SVCPU_HOST_R2(r5)
115 110
116 /* XXX swap in/out on load? */ 111 GET_SHADOW_VCPU_R13
117 PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4) 112 PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4)
118 PPC_STL r3, SVCPU_VMHANDLER(r5) 113 PPC_STL r3, HSTATE_VMHANDLER(r13)
119
120kvm_start_lightweight:
121 114
122 PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ 115 PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
123 116
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index dd03689fc609..c1f877c4a884 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,7 +36,6 @@
36#if defined(CONFIG_PPC_BOOK3S_64) 36#if defined(CONFIG_PPC_BOOK3S_64)
37 37
38#define LOAD_SHADOW_VCPU(reg) GET_PACA(reg) 38#define LOAD_SHADOW_VCPU(reg) GET_PACA(reg)
39#define SHADOW_VCPU_OFF PACA_KVM_SVCPU
40#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR) 39#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR)
41#define FUNC(name) GLUE(.,name) 40#define FUNC(name) GLUE(.,name)
42 41
@@ -66,7 +65,6 @@ kvmppc_skip_Hinterrupt:
66 65
67#elif defined(CONFIG_PPC_BOOK3S_32) 66#elif defined(CONFIG_PPC_BOOK3S_32)
68 67
69#define SHADOW_VCPU_OFF 0
70#define MSR_NOIRQ MSR_KERNEL 68#define MSR_NOIRQ MSR_KERNEL
71#define FUNC(name) name 69#define FUNC(name) name
72 70
@@ -96,14 +94,14 @@ kvmppc_trampoline_\intno:
96 b kvmppc_resume_\intno /* Get back original handler */ 94 b kvmppc_resume_\intno /* Get back original handler */
97 95
981: tophys(r13, r13) 961: tophys(r13, r13)
99 stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 97 stw r12, HSTATE_SCRATCH1(r13)
100 mfspr r12, SPRN_SPRG_SCRATCH1 98 mfspr r12, SPRN_SPRG_SCRATCH1
101 stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) 99 stw r12, HSTATE_SCRATCH0(r13)
102 lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) 100 lbz r12, HSTATE_IN_GUEST(r13)
103 cmpwi r12, KVM_GUEST_MODE_NONE 101 cmpwi r12, KVM_GUEST_MODE_NONE
104 bne ..kvmppc_handler_hasmagic_\intno 102 bne ..kvmppc_handler_hasmagic_\intno
105 /* No KVM guest? Then jump back to the Linux handler! */ 103 /* No KVM guest? Then jump back to the Linux handler! */
106 lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 104 lwz r12, HSTATE_SCRATCH1(r13)
107 b 2b 105 b 2b
108 106
109 /* Now we know we're handling a KVM guest */ 107 /* Now we know we're handling a KVM guest */
@@ -146,8 +144,8 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
146 * 144 *
147 * R12 = free 145 * R12 = free
148 * R13 = Shadow VCPU (PACA) 146 * R13 = Shadow VCPU (PACA)
149 * SVCPU.SCRATCH0 = guest R12 147 * HSTATE.SCRATCH0 = guest R12
150 * SVCPU.SCRATCH1 = guest CR 148 * HSTATE.SCRATCH1 = guest CR
151 * SPRG_SCRATCH0 = guest R13 149 * SPRG_SCRATCH0 = guest R13
152 * 150 *
153 */ 151 */
@@ -159,9 +157,9 @@ kvmppc_handler_skip_ins:
159 mtsrr0 r12 157 mtsrr0 r12
160 158
161 /* Clean up all state */ 159 /* Clean up all state */
162 lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 160 lwz r12, HSTATE_SCRATCH1(r13)
163 mtcr r12 161 mtcr r12
164 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) 162 PPC_LL r12, HSTATE_SCRATCH0(r13)
165 GET_SCRATCH0(r13) 163 GET_SCRATCH0(r13)
166 164
167 /* And get back into the code */ 165 /* And get back into the code */
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 4a623eb28a53..1cc25e8c0cf1 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -22,7 +22,7 @@
22#if defined(CONFIG_PPC_BOOK3S_64) 22#if defined(CONFIG_PPC_BOOK3S_64)
23 23
24#define GET_SHADOW_VCPU(reg) \ 24#define GET_SHADOW_VCPU(reg) \
25 addi reg, r13, PACA_KVM_SVCPU 25 mr reg, r13
26 26
27#elif defined(CONFIG_PPC_BOOK3S_32) 27#elif defined(CONFIG_PPC_BOOK3S_32)
28 28
@@ -71,6 +71,10 @@ kvmppc_handler_trampoline_enter:
71 /* r3 = shadow vcpu */ 71 /* r3 = shadow vcpu */
72 GET_SHADOW_VCPU(r3) 72 GET_SHADOW_VCPU(r3)
73 73
74 /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */
75 PPC_STL r1, HSTATE_HOST_R1(r3)
76 PPC_STL r2, HSTATE_HOST_R2(r3)
77
74 /* Move SRR0 and SRR1 into the respective regs */ 78 /* Move SRR0 and SRR1 into the respective regs */
75 PPC_LL r9, SVCPU_PC(r3) 79 PPC_LL r9, SVCPU_PC(r3)
76 mtsrr0 r9 80 mtsrr0 r9
@@ -78,7 +82,7 @@ kvmppc_handler_trampoline_enter:
78 82
79 /* Activate guest mode, so faults get handled by KVM */ 83 /* Activate guest mode, so faults get handled by KVM */
80 li r11, KVM_GUEST_MODE_GUEST 84 li r11, KVM_GUEST_MODE_GUEST
81 stb r11, SVCPU_IN_GUEST(r3) 85 stb r11, HSTATE_IN_GUEST(r3)
82 86
83 /* Switch to guest segment. This is subarch specific. */ 87 /* Switch to guest segment. This is subarch specific. */
84 LOAD_GUEST_SEGMENTS 88 LOAD_GUEST_SEGMENTS
@@ -132,30 +136,30 @@ kvmppc_interrupt:
132 * 136 *
133 * SPRG_SCRATCH0 = guest R13 137 * SPRG_SCRATCH0 = guest R13
134 * R12 = exit handler id 138 * R12 = exit handler id
135 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] 139 * R13 = shadow vcpu (32-bit) or PACA (64-bit)
136 * SVCPU.SCRATCH0 = guest R12 140 * HSTATE.SCRATCH0 = guest R12
137 * SVCPU.SCRATCH1 = guest CR 141 * HSTATE.SCRATCH1 = guest CR
138 * 142 *
139 */ 143 */
140 144
141 /* Save registers */ 145 /* Save registers */
142 146
143 PPC_STL r0, (SHADOW_VCPU_OFF + SVCPU_R0)(r13) 147 PPC_STL r0, SVCPU_R0(r13)
144 PPC_STL r1, (SHADOW_VCPU_OFF + SVCPU_R1)(r13) 148 PPC_STL r1, SVCPU_R1(r13)
145 PPC_STL r2, (SHADOW_VCPU_OFF + SVCPU_R2)(r13) 149 PPC_STL r2, SVCPU_R2(r13)
146 PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_R3)(r13) 150 PPC_STL r3, SVCPU_R3(r13)
147 PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_R4)(r13) 151 PPC_STL r4, SVCPU_R4(r13)
148 PPC_STL r5, (SHADOW_VCPU_OFF + SVCPU_R5)(r13) 152 PPC_STL r5, SVCPU_R5(r13)
149 PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_R6)(r13) 153 PPC_STL r6, SVCPU_R6(r13)
150 PPC_STL r7, (SHADOW_VCPU_OFF + SVCPU_R7)(r13) 154 PPC_STL r7, SVCPU_R7(r13)
151 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R8)(r13) 155 PPC_STL r8, SVCPU_R8(r13)
152 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R9)(r13) 156 PPC_STL r9, SVCPU_R9(r13)
153 PPC_STL r10, (SHADOW_VCPU_OFF + SVCPU_R10)(r13) 157 PPC_STL r10, SVCPU_R10(r13)
154 PPC_STL r11, (SHADOW_VCPU_OFF + SVCPU_R11)(r13) 158 PPC_STL r11, SVCPU_R11(r13)
155 159
156 /* Restore R1/R2 so we can handle faults */ 160 /* Restore R1/R2 so we can handle faults */
157 PPC_LL r1, (SHADOW_VCPU_OFF + SVCPU_HOST_R1)(r13) 161 PPC_LL r1, HSTATE_HOST_R1(r13)
158 PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13) 162 PPC_LL r2, HSTATE_HOST_R2(r13)
159 163
160 /* Save guest PC and MSR */ 164 /* Save guest PC and MSR */
161#ifdef CONFIG_PPC64 165#ifdef CONFIG_PPC64
@@ -171,17 +175,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206)
1711: mfsrr0 r3 1751: mfsrr0 r3
172 mfsrr1 r4 176 mfsrr1 r4
1732: 1772:
174 PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13) 178 PPC_STL r3, SVCPU_PC(r13)
175 PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13) 179 PPC_STL r4, SVCPU_SHADOW_SRR1(r13)
176 180
177 /* Get scratch'ed off registers */ 181 /* Get scratch'ed off registers */
178 GET_SCRATCH0(r9) 182 GET_SCRATCH0(r9)
179 PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) 183 PPC_LL r8, HSTATE_SCRATCH0(r13)
180 lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 184 lwz r7, HSTATE_SCRATCH1(r13)
181 185
182 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R13)(r13) 186 PPC_STL r9, SVCPU_R13(r13)
183 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R12)(r13) 187 PPC_STL r8, SVCPU_R12(r13)
184 stw r7, (SHADOW_VCPU_OFF + SVCPU_CR)(r13) 188 stw r7, SVCPU_CR(r13)
185 189
186 /* Save more register state */ 190 /* Save more register state */
187 191
@@ -191,11 +195,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206)
191 mfctr r8 195 mfctr r8
192 mflr r9 196 mflr r9
193 197
194 stw r5, (SHADOW_VCPU_OFF + SVCPU_XER)(r13) 198 stw r5, SVCPU_XER(r13)
195 PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_FAULT_DAR)(r13) 199 PPC_STL r6, SVCPU_FAULT_DAR(r13)
196 stw r7, (SHADOW_VCPU_OFF + SVCPU_FAULT_DSISR)(r13) 200 stw r7, SVCPU_FAULT_DSISR(r13)
197 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_CTR)(r13) 201 PPC_STL r8, SVCPU_CTR(r13)
198 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_LR)(r13) 202 PPC_STL r9, SVCPU_LR(r13)
199 203
200 /* 204 /*
201 * In order for us to easily get the last instruction, 205 * In order for us to easily get the last instruction,
@@ -225,7 +229,7 @@ ld_last_inst:
225 /* Set guest mode to 'jump over instruction' so if lwz faults 229 /* Set guest mode to 'jump over instruction' so if lwz faults
226 * we'll just continue at the next IP. */ 230 * we'll just continue at the next IP. */
227 li r9, KVM_GUEST_MODE_SKIP 231 li r9, KVM_GUEST_MODE_SKIP
228 stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) 232 stb r9, HSTATE_IN_GUEST(r13)
229 233
230 /* 1) enable paging for data */ 234 /* 1) enable paging for data */
231 mfmsr r9 235 mfmsr r9
@@ -239,13 +243,13 @@ ld_last_inst:
239 sync 243 sync
240 244
241#endif 245#endif
242 stw r0, (SHADOW_VCPU_OFF + SVCPU_LAST_INST)(r13) 246 stw r0, SVCPU_LAST_INST(r13)
243 247
244no_ld_last_inst: 248no_ld_last_inst:
245 249
246 /* Unset guest mode */ 250 /* Unset guest mode */
247 li r9, KVM_GUEST_MODE_NONE 251 li r9, KVM_GUEST_MODE_NONE
248 stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) 252 stb r9, HSTATE_IN_GUEST(r13)
249 253
250 /* Switch back to host MMU */ 254 /* Switch back to host MMU */
251 LOAD_HOST_SEGMENTS 255 LOAD_HOST_SEGMENTS
@@ -255,7 +259,7 @@ no_ld_last_inst:
255 * R1 = host R1 259 * R1 = host R1
256 * R2 = host R2 260 * R2 = host R2
257 * R12 = exit handler id 261 * R12 = exit handler id
258 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] 262 * R13 = shadow vcpu (32-bit) or PACA (64-bit)
259 * SVCPU.* = guest * 263 * SVCPU.* = guest *
260 * 264 *
261 */ 265 */
@@ -265,7 +269,7 @@ no_ld_last_inst:
265 ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */ 269 ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */
266 mtsrr1 r7 270 mtsrr1 r7
267 /* Load highmem handler address */ 271 /* Load highmem handler address */
268 PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_VMHANDLER)(r13) 272 PPC_LL r8, HSTATE_VMHANDLER(r13)
269 mtsrr0 r8 273 mtsrr0 r8
270 274
271 RFI 275 RFI