diff options
author | Alexander Graf <agraf@suse.de> | 2010-04-15 18:11:32 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-05-17 05:18:14 -0400 |
commit | 2191d657c9eaa4c444c33e014199ed9de1ac339d (patch) | |
tree | 091ac64b7ed2962ad9482660a58efa3532d4dcb7 /arch/powerpc/kvm/book3s_interrupts.S | |
parent | 77a1a715707d0f60ce0cfbe44070527a0a561f01 (diff) |
KVM: PPC: Name generic 64-bit code generic
We have quite some code that can be used by Book3S_32 and Book3S_64 alike,
so let's call it "Book3S" instead of "Book3S_64", so we can later on
use it from the 32 bit port too.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_interrupts.S')
-rw-r--r-- | arch/powerpc/kvm/book3s_interrupts.S | 317 |
1 files changed, 317 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S new file mode 100644 index 000000000000..570f87407691 --- /dev/null +++ b/arch/powerpc/kvm/book3s_interrupts.S | |||
@@ -0,0 +1,317 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright SUSE Linux Products GmbH 2009 | ||
16 | * | ||
17 | * Authors: Alexander Graf <agraf@suse.de> | ||
18 | */ | ||
19 | |||
20 | #include <asm/ppc_asm.h> | ||
21 | #include <asm/kvm_asm.h> | ||
22 | #include <asm/reg.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/asm-offsets.h> | ||
25 | #include <asm/exception-64s.h> | ||
26 | |||
27 | #define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit | ||
28 | #define ULONG_SIZE 8 | ||
29 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | ||
30 | |||
31 | .macro DISABLE_INTERRUPTS | ||
32 | mfmsr r0 | ||
33 | rldicl r0,r0,48,1 | ||
34 | rotldi r0,r0,16 | ||
35 | mtmsrd r0,1 | ||
36 | .endm | ||
37 | |||
38 | #define VCPU_LOAD_NVGPRS(vcpu) \ | ||
39 | ld r14, VCPU_GPR(r14)(vcpu); \ | ||
40 | ld r15, VCPU_GPR(r15)(vcpu); \ | ||
41 | ld r16, VCPU_GPR(r16)(vcpu); \ | ||
42 | ld r17, VCPU_GPR(r17)(vcpu); \ | ||
43 | ld r18, VCPU_GPR(r18)(vcpu); \ | ||
44 | ld r19, VCPU_GPR(r19)(vcpu); \ | ||
45 | ld r20, VCPU_GPR(r20)(vcpu); \ | ||
46 | ld r21, VCPU_GPR(r21)(vcpu); \ | ||
47 | ld r22, VCPU_GPR(r22)(vcpu); \ | ||
48 | ld r23, VCPU_GPR(r23)(vcpu); \ | ||
49 | ld r24, VCPU_GPR(r24)(vcpu); \ | ||
50 | ld r25, VCPU_GPR(r25)(vcpu); \ | ||
51 | ld r26, VCPU_GPR(r26)(vcpu); \ | ||
52 | ld r27, VCPU_GPR(r27)(vcpu); \ | ||
53 | ld r28, VCPU_GPR(r28)(vcpu); \ | ||
54 | ld r29, VCPU_GPR(r29)(vcpu); \ | ||
55 | ld r30, VCPU_GPR(r30)(vcpu); \ | ||
56 | ld r31, VCPU_GPR(r31)(vcpu); \ | ||
57 | |||
58 | /***************************************************************************** | ||
59 | * * | ||
60 | * Guest entry / exit code that is in kernel module memory (highmem) * | ||
61 | * * | ||
62 | ****************************************************************************/ | ||
63 | |||
64 | /* Registers: | ||
65 | * r3: kvm_run pointer | ||
66 | * r4: vcpu pointer | ||
67 | */ | ||
68 | _GLOBAL(__kvmppc_vcpu_entry) | ||
69 | |||
70 | kvm_start_entry: | ||
71 | /* Write correct stack frame */ | ||
72 | mflr r0 | ||
73 | std r0,16(r1) | ||
74 | |||
75 | /* Save host state to the stack */ | ||
76 | stdu r1, -SWITCH_FRAME_SIZE(r1) | ||
77 | |||
78 | /* Save r3 (kvm_run) and r4 (vcpu) */ | ||
79 | SAVE_2GPRS(3, r1) | ||
80 | |||
81 | /* Save non-volatile registers (r14 - r31) */ | ||
82 | SAVE_NVGPRS(r1) | ||
83 | |||
84 | /* Save LR */ | ||
85 | std r0, _LINK(r1) | ||
86 | |||
87 | /* Load non-volatile guest state from the vcpu */ | ||
88 | VCPU_LOAD_NVGPRS(r4) | ||
89 | |||
90 | /* Save R1/R2 in the PACA */ | ||
91 | std r1, PACA_KVM_HOST_R1(r13) | ||
92 | std r2, PACA_KVM_HOST_R2(r13) | ||
93 | |||
94 | /* XXX swap in/out on load? */ | ||
95 | ld r3, VCPU_HIGHMEM_HANDLER(r4) | ||
96 | std r3, PACA_KVM_VMHANDLER(r13) | ||
97 | |||
98 | kvm_start_lightweight: | ||
99 | |||
100 | ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ | ||
101 | ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ | ||
102 | |||
103 | /* Load some guest state in the respective registers */ | ||
104 | ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */ | ||
105 | /* will be swapped in by rmcall */ | ||
106 | |||
107 | ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ | ||
108 | mtlr r3 /* LR = r3 */ | ||
109 | |||
110 | DISABLE_INTERRUPTS | ||
111 | |||
112 | /* Some guests may need to have dcbz set to 32 byte length. | ||
113 | * | ||
114 | * Usually we ensure that by patching the guest's instructions | ||
115 | * to trap on dcbz and emulate it in the hypervisor. | ||
116 | * | ||
117 | * If we can, we should tell the CPU to use 32 byte dcbz though, | ||
118 | * because that's a lot faster. | ||
119 | */ | ||
120 | |||
121 | ld r3, VCPU_HFLAGS(r4) | ||
122 | rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */ | ||
123 | beq no_dcbz32_on | ||
124 | |||
125 | mfspr r3,SPRN_HID5 | ||
126 | ori r3, r3, 0x80 /* XXX HID5_dcbz32 = 0x80 */ | ||
127 | mtspr SPRN_HID5,r3 | ||
128 | |||
129 | no_dcbz32_on: | ||
130 | |||
131 | ld r6, VCPU_RMCALL(r4) | ||
132 | mtctr r6 | ||
133 | |||
134 | ld r3, VCPU_TRAMPOLINE_ENTER(r4) | ||
135 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | ||
136 | |||
137 | /* Jump to SLB patching handlder and into our guest */ | ||
138 | bctr | ||
139 | |||
140 | /* | ||
141 | * This is the handler in module memory. It gets jumped at from the | ||
142 | * lowmem trampoline code, so it's basically the guest exit code. | ||
143 | * | ||
144 | */ | ||
145 | |||
146 | .global kvmppc_handler_highmem | ||
147 | kvmppc_handler_highmem: | ||
148 | |||
149 | /* | ||
150 | * Register usage at this point: | ||
151 | * | ||
152 | * R0 = guest last inst | ||
153 | * R1 = host R1 | ||
154 | * R2 = host R2 | ||
155 | * R3 = guest PC | ||
156 | * R4 = guest MSR | ||
157 | * R5 = guest DAR | ||
158 | * R6 = guest DSISR | ||
159 | * R13 = PACA | ||
160 | * PACA.KVM.* = guest * | ||
161 | * | ||
162 | */ | ||
163 | |||
164 | /* R7 = vcpu */ | ||
165 | ld r7, GPR4(r1) | ||
166 | |||
167 | /* Now save the guest state */ | ||
168 | |||
169 | stw r0, VCPU_LAST_INST(r7) | ||
170 | |||
171 | std r3, VCPU_PC(r7) | ||
172 | std r4, VCPU_SHADOW_SRR1(r7) | ||
173 | std r5, VCPU_FAULT_DEAR(r7) | ||
174 | stw r6, VCPU_FAULT_DSISR(r7) | ||
175 | |||
176 | ld r5, VCPU_HFLAGS(r7) | ||
177 | rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ | ||
178 | beq no_dcbz32_off | ||
179 | |||
180 | li r4, 0 | ||
181 | mfspr r5,SPRN_HID5 | ||
182 | rldimi r5,r4,6,56 | ||
183 | mtspr SPRN_HID5,r5 | ||
184 | |||
185 | no_dcbz32_off: | ||
186 | |||
187 | std r14, VCPU_GPR(r14)(r7) | ||
188 | std r15, VCPU_GPR(r15)(r7) | ||
189 | std r16, VCPU_GPR(r16)(r7) | ||
190 | std r17, VCPU_GPR(r17)(r7) | ||
191 | std r18, VCPU_GPR(r18)(r7) | ||
192 | std r19, VCPU_GPR(r19)(r7) | ||
193 | std r20, VCPU_GPR(r20)(r7) | ||
194 | std r21, VCPU_GPR(r21)(r7) | ||
195 | std r22, VCPU_GPR(r22)(r7) | ||
196 | std r23, VCPU_GPR(r23)(r7) | ||
197 | std r24, VCPU_GPR(r24)(r7) | ||
198 | std r25, VCPU_GPR(r25)(r7) | ||
199 | std r26, VCPU_GPR(r26)(r7) | ||
200 | std r27, VCPU_GPR(r27)(r7) | ||
201 | std r28, VCPU_GPR(r28)(r7) | ||
202 | std r29, VCPU_GPR(r29)(r7) | ||
203 | std r30, VCPU_GPR(r30)(r7) | ||
204 | std r31, VCPU_GPR(r31)(r7) | ||
205 | |||
206 | /* Save guest CTR */ | ||
207 | mfctr r5 | ||
208 | std r5, VCPU_CTR(r7) | ||
209 | |||
210 | /* Save guest LR */ | ||
211 | mflr r5 | ||
212 | std r5, VCPU_LR(r7) | ||
213 | |||
214 | /* Restore host msr -> SRR1 */ | ||
215 | ld r6, VCPU_HOST_MSR(r7) | ||
216 | |||
217 | /* | ||
218 | * For some interrupts, we need to call the real Linux | ||
219 | * handler, so it can do work for us. This has to happen | ||
220 | * as if the interrupt arrived from the kernel though, | ||
221 | * so let's fake it here where most state is restored. | ||
222 | * | ||
223 | * Call Linux for hardware interrupts/decrementer | ||
224 | * r3 = address of interrupt handler (exit reason) | ||
225 | */ | ||
226 | |||
227 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | ||
228 | beq call_linux_handler | ||
229 | cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER | ||
230 | beq call_linux_handler | ||
231 | |||
232 | /* Back to EE=1 */ | ||
233 | mtmsr r6 | ||
234 | b kvm_return_point | ||
235 | |||
236 | call_linux_handler: | ||
237 | |||
238 | /* | ||
239 | * If we land here we need to jump back to the handler we | ||
240 | * came from. | ||
241 | * | ||
242 | * We have a page that we can access from real mode, so let's | ||
243 | * jump back to that and use it as a trampoline to get back into the | ||
244 | * interrupt handler! | ||
245 | * | ||
246 | * R3 still contains the exit code, | ||
247 | * R5 VCPU_HOST_RETIP and | ||
248 | * R6 VCPU_HOST_MSR | ||
249 | */ | ||
250 | |||
251 | /* Restore host IP -> SRR0 */ | ||
252 | ld r5, VCPU_HOST_RETIP(r7) | ||
253 | |||
254 | /* XXX Better move to a safe function? | ||
255 | * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */ | ||
256 | |||
257 | mtlr r12 | ||
258 | |||
259 | ld r4, VCPU_TRAMPOLINE_LOWMEM(r7) | ||
260 | mtsrr0 r4 | ||
261 | LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | ||
262 | mtsrr1 r3 | ||
263 | |||
264 | RFI | ||
265 | |||
266 | .global kvm_return_point | ||
267 | kvm_return_point: | ||
268 | |||
269 | /* Jump back to lightweight entry if we're supposed to */ | ||
270 | /* go back into the guest */ | ||
271 | |||
272 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | ||
273 | mr r5, r12 | ||
274 | |||
275 | /* Restore r3 (kvm_run) and r4 (vcpu) */ | ||
276 | REST_2GPRS(3, r1) | ||
277 | bl KVMPPC_HANDLE_EXIT | ||
278 | |||
279 | /* If RESUME_GUEST, get back in the loop */ | ||
280 | cmpwi r3, RESUME_GUEST | ||
281 | beq kvm_loop_lightweight | ||
282 | |||
283 | cmpwi r3, RESUME_GUEST_NV | ||
284 | beq kvm_loop_heavyweight | ||
285 | |||
286 | kvm_exit_loop: | ||
287 | |||
288 | ld r4, _LINK(r1) | ||
289 | mtlr r4 | ||
290 | |||
291 | /* Restore non-volatile host registers (r14 - r31) */ | ||
292 | REST_NVGPRS(r1) | ||
293 | |||
294 | addi r1, r1, SWITCH_FRAME_SIZE | ||
295 | blr | ||
296 | |||
297 | kvm_loop_heavyweight: | ||
298 | |||
299 | ld r4, _LINK(r1) | ||
300 | std r4, (16 + SWITCH_FRAME_SIZE)(r1) | ||
301 | |||
302 | /* Load vcpu and cpu_run */ | ||
303 | REST_2GPRS(3, r1) | ||
304 | |||
305 | /* Load non-volatile guest state from the vcpu */ | ||
306 | VCPU_LOAD_NVGPRS(r4) | ||
307 | |||
308 | /* Jump back into the beginning of this function */ | ||
309 | b kvm_start_lightweight | ||
310 | |||
311 | kvm_loop_lightweight: | ||
312 | |||
313 | /* We'll need the vcpu pointer */ | ||
314 | REST_GPR(4, r1) | ||
315 | |||
316 | /* Jump back into the beginning of this function */ | ||
317 | b kvm_start_lightweight | ||