diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/kvm/book3s_rmhandlers.S | 119 |
1 files changed, 88 insertions, 31 deletions
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index d89e315615bc..284f0a03891f 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
@@ -22,7 +22,10 @@ | |||
22 | #include <asm/reg.h> | 22 | #include <asm/reg.h> |
23 | #include <asm/page.h> | 23 | #include <asm/page.h> |
24 | #include <asm/asm-offsets.h> | 24 | #include <asm/asm-offsets.h> |
25 | |||
26 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
25 | #include <asm/exception-64s.h> | 27 | #include <asm/exception-64s.h> |
28 | #endif | ||
26 | 29 | ||
27 | /***************************************************************************** | 30 | /***************************************************************************** |
28 | * * | 31 | * * |
@@ -30,6 +33,39 @@ | |||
30 | * * | 33 | * * |
31 | ****************************************************************************/ | 34 | ****************************************************************************/ |
32 | 35 | ||
36 | #if defined(CONFIG_PPC_BOOK3S_64) | ||
37 | |||
38 | #define LOAD_SHADOW_VCPU(reg) \ | ||
39 | mfspr reg, SPRN_SPRG_PACA | ||
40 | |||
41 | #define SHADOW_VCPU_OFF PACA_KVM_SVCPU | ||
42 | #define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR) | ||
43 | #define FUNC(name) GLUE(.,name) | ||
44 | |||
45 | #elif defined(CONFIG_PPC_BOOK3S_32) | ||
46 | |||
47 | #define LOAD_SHADOW_VCPU(reg) \ | ||
48 | mfspr reg, SPRN_SPRG_THREAD; \ | ||
49 | lwz reg, THREAD_KVM_SVCPU(reg); \ | ||
50 | /* PPC32 can have a NULL pointer - let's check for that */ \ | ||
51 | mtspr SPRN_SPRG_SCRATCH1, r12; /* Save r12 */ \ | ||
52 | mfcr r12; \ | ||
53 | cmpwi reg, 0; \ | ||
54 | bne 1f; \ | ||
55 | mfspr reg, SPRN_SPRG_SCRATCH0; \ | ||
56 | mtcr r12; \ | ||
57 | mfspr r12, SPRN_SPRG_SCRATCH1; \ | ||
58 | b kvmppc_resume_\intno; \ | ||
59 | 1:; \ | ||
60 | mtcr r12; \ | ||
61 | mfspr r12, SPRN_SPRG_SCRATCH1; \ | ||
62 | tophys(reg, reg) | ||
63 | |||
64 | #define SHADOW_VCPU_OFF 0 | ||
65 | #define MSR_NOIRQ MSR_KERNEL | ||
66 | #define FUNC(name) name | ||
67 | |||
68 | #endif | ||
33 | 69 | ||
34 | .macro INTERRUPT_TRAMPOLINE intno | 70 | .macro INTERRUPT_TRAMPOLINE intno |
35 | 71 | ||
@@ -42,19 +78,19 @@ kvmppc_trampoline_\intno: | |||
42 | * First thing to do is to find out if we're coming | 78 | * First thing to do is to find out if we're coming |
43 | * from a KVM guest or a Linux process. | 79 | * from a KVM guest or a Linux process. |
44 | * | 80 | * |
45 | * To distinguish, we check a magic byte in the PACA | 81 | * To distinguish, we check a magic byte in the PACA/current |
46 | */ | 82 | */ |
47 | mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */ | 83 | LOAD_SHADOW_VCPU(r13) |
48 | std r12, PACA_KVM_SCRATCH0(r13) | 84 | PPC_STL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) |
49 | mfcr r12 | 85 | mfcr r12 |
50 | stw r12, PACA_KVM_SCRATCH1(r13) | 86 | stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) |
51 | lbz r12, PACA_KVM_IN_GUEST(r13) | 87 | lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) |
52 | cmpwi r12, KVM_GUEST_MODE_NONE | 88 | cmpwi r12, KVM_GUEST_MODE_NONE |
53 | bne ..kvmppc_handler_hasmagic_\intno | 89 | bne ..kvmppc_handler_hasmagic_\intno |
54 | /* No KVM guest? Then jump back to the Linux handler! */ | 90 | /* No KVM guest? Then jump back to the Linux handler! */ |
55 | lwz r12, PACA_KVM_SCRATCH1(r13) | 91 | lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) |
56 | mtcr r12 | 92 | mtcr r12 |
57 | ld r12, PACA_KVM_SCRATCH0(r13) | 93 | PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) |
58 | mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ | 94 | mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ |
59 | b kvmppc_resume_\intno /* Get back original handler */ | 95 | b kvmppc_resume_\intno /* Get back original handler */ |
60 | 96 | ||
@@ -76,9 +112,7 @@ kvmppc_trampoline_\intno: | |||
76 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET | 112 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET |
77 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK | 113 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK |
78 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE | 114 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE |
79 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT | ||
80 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE | 115 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE |
81 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT | ||
82 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL | 116 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL |
83 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT | 117 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT |
84 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM | 118 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM |
@@ -88,7 +122,14 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSCALL | |||
88 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE | 122 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE |
89 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON | 123 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON |
90 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC | 124 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC |
125 | |||
126 | /* Those are only available on 64 bit machines */ | ||
127 | |||
128 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
129 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT | ||
130 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT | ||
91 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX | 131 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX |
132 | #endif | ||
92 | 133 | ||
93 | /* | 134 | /* |
94 | * Bring us back to the faulting code, but skip the | 135 | * Bring us back to the faulting code, but skip the |
@@ -99,11 +140,11 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX | |||
99 | * | 140 | * |
100 | * Input Registers: | 141 | * Input Registers: |
101 | * | 142 | * |
102 | * R12 = free | 143 | * R12 = free |
103 | * R13 = PACA | 144 | * R13 = Shadow VCPU (PACA) |
104 | * PACA.KVM.SCRATCH0 = guest R12 | 145 | * SVCPU.SCRATCH0 = guest R12 |
105 | * PACA.KVM.SCRATCH1 = guest CR | 146 | * SVCPU.SCRATCH1 = guest CR |
106 | * SPRG_SCRATCH0 = guest R13 | 147 | * SPRG_SCRATCH0 = guest R13 |
107 | * | 148 | * |
108 | */ | 149 | */ |
109 | kvmppc_handler_skip_ins: | 150 | kvmppc_handler_skip_ins: |
@@ -114,9 +155,9 @@ kvmppc_handler_skip_ins: | |||
114 | mtsrr0 r12 | 155 | mtsrr0 r12 |
115 | 156 | ||
116 | /* Clean up all state */ | 157 | /* Clean up all state */ |
117 | lwz r12, PACA_KVM_SCRATCH1(r13) | 158 | lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) |
118 | mtcr r12 | 159 | mtcr r12 |
119 | ld r12, PACA_KVM_SCRATCH0(r13) | 160 | PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) |
120 | mfspr r13, SPRN_SPRG_SCRATCH0 | 161 | mfspr r13, SPRN_SPRG_SCRATCH0 |
121 | 162 | ||
122 | /* And get back into the code */ | 163 | /* And get back into the code */ |
@@ -147,32 +188,48 @@ kvmppc_handler_lowmem_trampoline_end: | |||
147 | * | 188 | * |
148 | * R3 = function | 189 | * R3 = function |
149 | * R4 = MSR | 190 | * R4 = MSR |
150 | * R5 = CTR | 191 | * R5 = scratch register |
151 | * | 192 | * |
152 | */ | 193 | */ |
153 | _GLOBAL(kvmppc_rmcall) | 194 | _GLOBAL(kvmppc_rmcall) |
154 | mtmsr r4 /* Disable relocation, so mtsrr | 195 | LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ) |
196 | mtmsr r5 /* Disable relocation and interrupts, so mtsrr | ||
155 | doesn't get interrupted */ | 197 | doesn't get interrupted */ |
156 | mtctr r5 | 198 | sync |
157 | mtsrr0 r3 | 199 | mtsrr0 r3 |
158 | mtsrr1 r4 | 200 | mtsrr1 r4 |
159 | RFI | 201 | RFI |
160 | 202 | ||
203 | #if defined(CONFIG_PPC_BOOK3S_32) | ||
204 | #define STACK_LR INT_FRAME_SIZE+4 | ||
205 | #elif defined(CONFIG_PPC_BOOK3S_64) | ||
206 | #define STACK_LR _LINK | ||
207 | #endif | ||
208 | |||
161 | /* | 209 | /* |
162 | * Activate current's external feature (FPU/Altivec/VSX) | 210 | * Activate current's external feature (FPU/Altivec/VSX) |
163 | */ | 211 | */ |
164 | #define define_load_up(what) \ | 212 | #define define_load_up(what) \ |
165 | \ | 213 | \ |
166 | _GLOBAL(kvmppc_load_up_ ## what); \ | 214 | _GLOBAL(kvmppc_load_up_ ## what); \ |
167 | stdu r1, -INT_FRAME_SIZE(r1); \ | 215 | PPC_STLU r1, -INT_FRAME_SIZE(r1); \ |
168 | mflr r3; \ | 216 | mflr r3; \ |
169 | std r3, _LINK(r1); \ | 217 | PPC_STL r3, STACK_LR(r1); \ |
170 | \ | 218 | PPC_STL r20, _NIP(r1); \ |
171 | bl .load_up_ ## what; \ | 219 | mfmsr r20; \ |
172 | \ | 220 | LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \ |
173 | ld r3, _LINK(r1); \ | 221 | andc r3,r20,r3; /* Disable DR,EE */ \ |
174 | mtlr r3; \ | 222 | mtmsr r3; \ |
175 | addi r1, r1, INT_FRAME_SIZE; \ | 223 | sync; \ |
224 | \ | ||
225 | bl FUNC(load_up_ ## what); \ | ||
226 | \ | ||
227 | mtmsr r20; /* Enable DR,EE */ \ | ||
228 | sync; \ | ||
229 | PPC_LL r3, STACK_LR(r1); \ | ||
230 | PPC_LL r20, _NIP(r1); \ | ||
231 | mtlr r3; \ | ||
232 | addi r1, r1, INT_FRAME_SIZE; \ | ||
176 | blr | 233 | blr |
177 | 234 | ||
178 | define_load_up(fpu) | 235 | define_load_up(fpu) |