aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-04-15 18:11:35 -0400
committerAvi Kivity <avi@redhat.com>2010-05-17 05:18:18 -0400
commit0737279427bef48f552b3ab63a6c0ba7491fe29f (patch)
treec7f5e50f36317176175b581125262405c157f570 /arch/powerpc
parent786f19daa8b109ae6b96a351eee3a14b9f8b57d0 (diff)
KVM: PPC: Add generic segment switching code
This is the code that will later be used instead of book3s_64_slb.S. It does the last step of guest entry and the first generic steps of guest exiting, once we have determined the interrupt is a KVM interrupt. It also reads the last used instruction from the guest virtual address space if necessary, to speed up that path. The new thing about this file is that it makes use of generic long load and store functions and calls a macro to fill in the actual segment switching code. That still needs to be done differently for book3s_32 and book3s_64. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kvm/book3s_segment.S257
1 files changed, 257 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
new file mode 100644
index 000000000000..4c0d1d85d20a
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -0,0 +1,257 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20/* Real mode helpers */
21
22#if defined(CONFIG_PPC_BOOK3S_64)
23
24#define GET_SHADOW_VCPU(reg) \
25 addi reg, r13, PACA_KVM_SVCPU
26
27#elif defined(CONFIG_PPC_BOOK3S_32)
28
29#define GET_SHADOW_VCPU(reg) \
30 tophys(reg, r2); \
31 lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \
32 tophys(reg, reg)
33
34#endif
35
36/* Disable for nested KVM */
37#define USE_QUICK_LAST_INST
38
39
40/* Get helper functions for subarch specific functionality */
41
42#if defined(CONFIG_PPC_BOOK3S_64)
43#include "book3s_64_slb.S"
44#elif defined(CONFIG_PPC_BOOK3S_32)
45#include "book3s_32_sr.S"
46#endif
47
48/******************************************************************************
49 * *
50 * Entry code *
51 * *
52 *****************************************************************************/
53
54.global kvmppc_handler_trampoline_enter
55kvmppc_handler_trampoline_enter:
56
57 /* Required state:
58 *
59 * MSR = ~IR|DR
60 * R13 = PACA
61 * R1 = host R1
62 * R2 = host R2
63 * R10 = guest MSR
64 * all other volatile GPRS = free
65 * SVCPU[CR] = guest CR
66 * SVCPU[XER] = guest XER
67 * SVCPU[CTR] = guest CTR
68 * SVCPU[LR] = guest LR
69 */
70
71 /* r3 = shadow vcpu */
72 GET_SHADOW_VCPU(r3)
73
74 /* Move SRR0 and SRR1 into the respective regs */
75 PPC_LL r9, SVCPU_PC(r3)
76 mtsrr0 r9
77 mtsrr1 r10
78
79 /* Activate guest mode, so faults get handled by KVM */
80 li r11, KVM_GUEST_MODE_GUEST
81 stb r11, SVCPU_IN_GUEST(r3)
82
83 /* Switch to guest segment. This is subarch specific. */
84 LOAD_GUEST_SEGMENTS
85
86 /* Enter guest */
87
88 PPC_LL r4, (SVCPU_CTR)(r3)
89 PPC_LL r5, (SVCPU_LR)(r3)
90 lwz r6, (SVCPU_CR)(r3)
91 lwz r7, (SVCPU_XER)(r3)
92
93 mtctr r4
94 mtlr r5
95 mtcr r6
96 mtxer r7
97
98 PPC_LL r0, (SVCPU_R0)(r3)
99 PPC_LL r1, (SVCPU_R1)(r3)
100 PPC_LL r2, (SVCPU_R2)(r3)
101 PPC_LL r4, (SVCPU_R4)(r3)
102 PPC_LL r5, (SVCPU_R5)(r3)
103 PPC_LL r6, (SVCPU_R6)(r3)
104 PPC_LL r7, (SVCPU_R7)(r3)
105 PPC_LL r8, (SVCPU_R8)(r3)
106 PPC_LL r9, (SVCPU_R9)(r3)
107 PPC_LL r10, (SVCPU_R10)(r3)
108 PPC_LL r11, (SVCPU_R11)(r3)
109 PPC_LL r12, (SVCPU_R12)(r3)
110 PPC_LL r13, (SVCPU_R13)(r3)
111
112 PPC_LL r3, (SVCPU_R3)(r3)
113
114 RFI
115kvmppc_handler_trampoline_enter_end:
116
117
118
119/******************************************************************************
120 * *
121 * Exit code *
122 * *
123 *****************************************************************************/
124
125.global kvmppc_handler_trampoline_exit
126kvmppc_handler_trampoline_exit:
127
128 /* Register usage at this point:
129 *
130 * SPRG_SCRATCH0 = guest R13
131 * R12 = exit handler id
132 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
133 * SVCPU.SCRATCH0 = guest R12
134 * SVCPU.SCRATCH1 = guest CR
135 *
136 */
137
138 /* Save registers */
139
140 PPC_STL r0, (SHADOW_VCPU_OFF + SVCPU_R0)(r13)
141 PPC_STL r1, (SHADOW_VCPU_OFF + SVCPU_R1)(r13)
142 PPC_STL r2, (SHADOW_VCPU_OFF + SVCPU_R2)(r13)
143 PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_R3)(r13)
144 PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_R4)(r13)
145 PPC_STL r5, (SHADOW_VCPU_OFF + SVCPU_R5)(r13)
146 PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_R6)(r13)
147 PPC_STL r7, (SHADOW_VCPU_OFF + SVCPU_R7)(r13)
148 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R8)(r13)
149 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R9)(r13)
150 PPC_STL r10, (SHADOW_VCPU_OFF + SVCPU_R10)(r13)
151 PPC_STL r11, (SHADOW_VCPU_OFF + SVCPU_R11)(r13)
152
153 /* Restore R1/R2 so we can handle faults */
154 PPC_LL r1, (SHADOW_VCPU_OFF + SVCPU_HOST_R1)(r13)
155 PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13)
156
157 /* Save guest PC and MSR */
158 mfsrr0 r3
159 mfsrr1 r4
160
161 PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13)
162 PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13)
163
164 /* Get scratch'ed off registers */
165 mfspr r9, SPRN_SPRG_SCRATCH0
166 PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
167 lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
168
169 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R13)(r13)
170 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R12)(r13)
171 stw r7, (SHADOW_VCPU_OFF + SVCPU_CR)(r13)
172
173 /* Save more register state */
174
175 mfxer r5
176 mfdar r6
177 mfdsisr r7
178 mfctr r8
179 mflr r9
180
181 stw r5, (SHADOW_VCPU_OFF + SVCPU_XER)(r13)
182 PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_FAULT_DAR)(r13)
183 stw r7, (SHADOW_VCPU_OFF + SVCPU_FAULT_DSISR)(r13)
184 PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_CTR)(r13)
185 PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_LR)(r13)
186
187 /*
188 * In order for us to easily get the last instruction,
189 * we got the #vmexit at, we exploit the fact that the
190 * virtual layout is still the same here, so we can just
191 * ld from the guest's PC address
192 */
193
194 /* We only load the last instruction when it's safe */
195 cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
196 beq ld_last_inst
197 cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
198 beq ld_last_inst
199
200 b no_ld_last_inst
201
202ld_last_inst:
203 /* Save off the guest instruction we're at */
204
205 /* In case lwz faults */
206 li r0, KVM_INST_FETCH_FAILED
207
208#ifdef USE_QUICK_LAST_INST
209
210 /* Set guest mode to 'jump over instruction' so if lwz faults
211 * we'll just continue at the next IP. */
212 li r9, KVM_GUEST_MODE_SKIP
213 stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
214
215 /* 1) enable paging for data */
216 mfmsr r9
217 ori r11, r9, MSR_DR /* Enable paging for data */
218 mtmsr r11
219 sync
220 /* 2) fetch the instruction */
221 lwz r0, 0(r3)
222 /* 3) disable paging again */
223 mtmsr r9
224 sync
225
226#endif
227 stw r0, (SHADOW_VCPU_OFF + SVCPU_LAST_INST)(r13)
228
229no_ld_last_inst:
230
231 /* Unset guest mode */
232 li r9, KVM_GUEST_MODE_NONE
233 stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
234
235 /* Switch back to host MMU */
236 LOAD_HOST_SEGMENTS
237
238 /* Register usage at this point:
239 *
240 * R1 = host R1
241 * R2 = host R2
242 * R12 = exit handler id
243 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
244 * SVCPU.* = guest *
245 *
246 */
247
248 /* RFI into the highmem handler */
249 mfmsr r7
250 ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */
251 mtsrr1 r7
252 /* Load highmem handler address */
253 PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_VMHANDLER)(r13)
254 mtsrr0 r8
255
256 RFI
257kvmppc_handler_trampoline_exit_end: