diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_segment.S')
-rw-r--r-- | arch/powerpc/kvm/book3s_segment.S | 259 |
1 files changed, 259 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S new file mode 100644 index 000000000000..7c52ed0b7051 --- /dev/null +++ b/arch/powerpc/kvm/book3s_segment.S | |||
@@ -0,0 +1,259 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright SUSE Linux Products GmbH 2010 | ||
16 | * | ||
17 | * Authors: Alexander Graf <agraf@suse.de> | ||
18 | */ | ||
19 | |||
20 | /* Real mode helpers */ | ||
21 | |||
22 | #if defined(CONFIG_PPC_BOOK3S_64) | ||
23 | |||
24 | #define GET_SHADOW_VCPU(reg) \ | ||
25 | addi reg, r13, PACA_KVM_SVCPU | ||
26 | |||
27 | #elif defined(CONFIG_PPC_BOOK3S_32) | ||
28 | |||
29 | #define GET_SHADOW_VCPU(reg) \ | ||
30 | tophys(reg, r2); \ | ||
31 | lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \ | ||
32 | tophys(reg, reg) | ||
33 | |||
34 | #endif | ||
35 | |||
36 | /* Disable for nested KVM */ | ||
37 | #define USE_QUICK_LAST_INST | ||
38 | |||
39 | |||
40 | /* Get helper functions for subarch specific functionality */ | ||
41 | |||
42 | #if defined(CONFIG_PPC_BOOK3S_64) | ||
43 | #include "book3s_64_slb.S" | ||
44 | #elif defined(CONFIG_PPC_BOOK3S_32) | ||
45 | #include "book3s_32_sr.S" | ||
46 | #endif | ||
47 | |||
48 | /****************************************************************************** | ||
49 | * * | ||
50 | * Entry code * | ||
51 | * * | ||
52 | *****************************************************************************/ | ||
53 | |||
54 | .global kvmppc_handler_trampoline_enter | ||
55 | kvmppc_handler_trampoline_enter: | ||
56 | |||
57 | /* Required state: | ||
58 | * | ||
59 | * MSR = ~IR|DR | ||
60 | * R13 = PACA | ||
61 | * R1 = host R1 | ||
62 | * R2 = host R2 | ||
63 | * R10 = guest MSR | ||
64 | * all other volatile GPRS = free | ||
65 | * SVCPU[CR] = guest CR | ||
66 | * SVCPU[XER] = guest XER | ||
67 | * SVCPU[CTR] = guest CTR | ||
68 | * SVCPU[LR] = guest LR | ||
69 | */ | ||
70 | |||
71 | /* r3 = shadow vcpu */ | ||
72 | GET_SHADOW_VCPU(r3) | ||
73 | |||
74 | /* Move SRR0 and SRR1 into the respective regs */ | ||
75 | PPC_LL r9, SVCPU_PC(r3) | ||
76 | mtsrr0 r9 | ||
77 | mtsrr1 r10 | ||
78 | |||
79 | /* Activate guest mode, so faults get handled by KVM */ | ||
80 | li r11, KVM_GUEST_MODE_GUEST | ||
81 | stb r11, SVCPU_IN_GUEST(r3) | ||
82 | |||
83 | /* Switch to guest segment. This is subarch specific. */ | ||
84 | LOAD_GUEST_SEGMENTS | ||
85 | |||
86 | /* Enter guest */ | ||
87 | |||
88 | PPC_LL r4, (SVCPU_CTR)(r3) | ||
89 | PPC_LL r5, (SVCPU_LR)(r3) | ||
90 | lwz r6, (SVCPU_CR)(r3) | ||
91 | lwz r7, (SVCPU_XER)(r3) | ||
92 | |||
93 | mtctr r4 | ||
94 | mtlr r5 | ||
95 | mtcr r6 | ||
96 | mtxer r7 | ||
97 | |||
98 | PPC_LL r0, (SVCPU_R0)(r3) | ||
99 | PPC_LL r1, (SVCPU_R1)(r3) | ||
100 | PPC_LL r2, (SVCPU_R2)(r3) | ||
101 | PPC_LL r4, (SVCPU_R4)(r3) | ||
102 | PPC_LL r5, (SVCPU_R5)(r3) | ||
103 | PPC_LL r6, (SVCPU_R6)(r3) | ||
104 | PPC_LL r7, (SVCPU_R7)(r3) | ||
105 | PPC_LL r8, (SVCPU_R8)(r3) | ||
106 | PPC_LL r9, (SVCPU_R9)(r3) | ||
107 | PPC_LL r10, (SVCPU_R10)(r3) | ||
108 | PPC_LL r11, (SVCPU_R11)(r3) | ||
109 | PPC_LL r12, (SVCPU_R12)(r3) | ||
110 | PPC_LL r13, (SVCPU_R13)(r3) | ||
111 | |||
112 | PPC_LL r3, (SVCPU_R3)(r3) | ||
113 | |||
114 | RFI | ||
115 | kvmppc_handler_trampoline_enter_end: | ||
116 | |||
117 | |||
118 | |||
119 | /****************************************************************************** | ||
120 | * * | ||
121 | * Exit code * | ||
122 | * * | ||
123 | *****************************************************************************/ | ||
124 | |||
125 | .global kvmppc_handler_trampoline_exit | ||
126 | kvmppc_handler_trampoline_exit: | ||
127 | |||
128 | /* Register usage at this point: | ||
129 | * | ||
130 | * SPRG_SCRATCH0 = guest R13 | ||
131 | * R12 = exit handler id | ||
132 | * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] | ||
133 | * SVCPU.SCRATCH0 = guest R12 | ||
134 | * SVCPU.SCRATCH1 = guest CR | ||
135 | * | ||
136 | */ | ||
137 | |||
138 | /* Save registers */ | ||
139 | |||
140 | PPC_STL r0, (SHADOW_VCPU_OFF + SVCPU_R0)(r13) | ||
141 | PPC_STL r1, (SHADOW_VCPU_OFF + SVCPU_R1)(r13) | ||
142 | PPC_STL r2, (SHADOW_VCPU_OFF + SVCPU_R2)(r13) | ||
143 | PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_R3)(r13) | ||
144 | PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_R4)(r13) | ||
145 | PPC_STL r5, (SHADOW_VCPU_OFF + SVCPU_R5)(r13) | ||
146 | PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_R6)(r13) | ||
147 | PPC_STL r7, (SHADOW_VCPU_OFF + SVCPU_R7)(r13) | ||
148 | PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R8)(r13) | ||
149 | PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R9)(r13) | ||
150 | PPC_STL r10, (SHADOW_VCPU_OFF + SVCPU_R10)(r13) | ||
151 | PPC_STL r11, (SHADOW_VCPU_OFF + SVCPU_R11)(r13) | ||
152 | |||
153 | /* Restore R1/R2 so we can handle faults */ | ||
154 | PPC_LL r1, (SHADOW_VCPU_OFF + SVCPU_HOST_R1)(r13) | ||
155 | PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13) | ||
156 | |||
157 | /* Save guest PC and MSR */ | ||
158 | mfsrr0 r3 | ||
159 | mfsrr1 r4 | ||
160 | |||
161 | PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13) | ||
162 | PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13) | ||
163 | |||
164 | /* Get scratch'ed off registers */ | ||
165 | mfspr r9, SPRN_SPRG_SCRATCH0 | ||
166 | PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) | ||
167 | lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) | ||
168 | |||
169 | PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R13)(r13) | ||
170 | PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R12)(r13) | ||
171 | stw r7, (SHADOW_VCPU_OFF + SVCPU_CR)(r13) | ||
172 | |||
173 | /* Save more register state */ | ||
174 | |||
175 | mfxer r5 | ||
176 | mfdar r6 | ||
177 | mfdsisr r7 | ||
178 | mfctr r8 | ||
179 | mflr r9 | ||
180 | |||
181 | stw r5, (SHADOW_VCPU_OFF + SVCPU_XER)(r13) | ||
182 | PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_FAULT_DAR)(r13) | ||
183 | stw r7, (SHADOW_VCPU_OFF + SVCPU_FAULT_DSISR)(r13) | ||
184 | PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_CTR)(r13) | ||
185 | PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_LR)(r13) | ||
186 | |||
187 | /* | ||
188 | * In order for us to easily get the last instruction, | ||
189 | * we got the #vmexit at, we exploit the fact that the | ||
190 | * virtual layout is still the same here, so we can just | ||
191 | * ld from the guest's PC address | ||
192 | */ | ||
193 | |||
194 | /* We only load the last instruction when it's safe */ | ||
195 | cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE | ||
196 | beq ld_last_inst | ||
197 | cmpwi r12, BOOK3S_INTERRUPT_PROGRAM | ||
198 | beq ld_last_inst | ||
199 | cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT | ||
200 | beq- ld_last_inst | ||
201 | |||
202 | b no_ld_last_inst | ||
203 | |||
204 | ld_last_inst: | ||
205 | /* Save off the guest instruction we're at */ | ||
206 | |||
207 | /* In case lwz faults */ | ||
208 | li r0, KVM_INST_FETCH_FAILED | ||
209 | |||
210 | #ifdef USE_QUICK_LAST_INST | ||
211 | |||
212 | /* Set guest mode to 'jump over instruction' so if lwz faults | ||
213 | * we'll just continue at the next IP. */ | ||
214 | li r9, KVM_GUEST_MODE_SKIP | ||
215 | stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) | ||
216 | |||
217 | /* 1) enable paging for data */ | ||
218 | mfmsr r9 | ||
219 | ori r11, r9, MSR_DR /* Enable paging for data */ | ||
220 | mtmsr r11 | ||
221 | sync | ||
222 | /* 2) fetch the instruction */ | ||
223 | lwz r0, 0(r3) | ||
224 | /* 3) disable paging again */ | ||
225 | mtmsr r9 | ||
226 | sync | ||
227 | |||
228 | #endif | ||
229 | stw r0, (SHADOW_VCPU_OFF + SVCPU_LAST_INST)(r13) | ||
230 | |||
231 | no_ld_last_inst: | ||
232 | |||
233 | /* Unset guest mode */ | ||
234 | li r9, KVM_GUEST_MODE_NONE | ||
235 | stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) | ||
236 | |||
237 | /* Switch back to host MMU */ | ||
238 | LOAD_HOST_SEGMENTS | ||
239 | |||
240 | /* Register usage at this point: | ||
241 | * | ||
242 | * R1 = host R1 | ||
243 | * R2 = host R2 | ||
244 | * R12 = exit handler id | ||
245 | * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] | ||
246 | * SVCPU.* = guest * | ||
247 | * | ||
248 | */ | ||
249 | |||
250 | /* RFI into the highmem handler */ | ||
251 | mfmsr r7 | ||
252 | ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */ | ||
253 | mtsrr1 r7 | ||
254 | /* Load highmem handler address */ | ||
255 | PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_VMHANDLER)(r13) | ||
256 | mtsrr0 r8 | ||
257 | |||
258 | RFI | ||
259 | kvmppc_handler_trampoline_exit_end: | ||