diff options
author | Alexander Graf <agraf@suse.de> | 2010-04-15 18:11:47 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-05-17 05:18:37 -0400 |
commit | b79fcdf67e9e03773fb032679675d8008d5cc2dc (patch) | |
tree | 3dcc82312d4ef059b33496486a29d630d1e0d87e /arch/powerpc | |
parent | 8c3a4e0b673ba8b274399f575dc803a89a953a66 (diff) |
KVM: PPC: Make highmem code generic
Since we now have several fields in the shadow VCPU, we also change
the internal calling convention between the different entry/exit code
layers.
Let's reflect that in the IR=1 code and make sure we use "long" defines
for long field access.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/kvm/book3s_interrupts.S | 201 |
1 files changed, 101 insertions, 100 deletions
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 570f87407691..a1b50280dc47 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S | |||
@@ -24,36 +24,56 @@ | |||
24 | #include <asm/asm-offsets.h> | 24 | #include <asm/asm-offsets.h> |
25 | #include <asm/exception-64s.h> | 25 | #include <asm/exception-64s.h> |
26 | 26 | ||
27 | #define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit | 27 | #if defined(CONFIG_PPC_BOOK3S_64) |
28 | #define ULONG_SIZE 8 | ||
29 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | ||
30 | 28 | ||
31 | .macro DISABLE_INTERRUPTS | 29 | #define ULONG_SIZE 8 |
32 | mfmsr r0 | 30 | #define FUNC(name) GLUE(.,name) |
33 | rldicl r0,r0,48,1 | ||
34 | rotldi r0,r0,16 | ||
35 | mtmsrd r0,1 | ||
36 | .endm | ||
37 | 31 | ||
32 | #define GET_SHADOW_VCPU(reg) \ | ||
33 | addi reg, r13, PACA_KVM_SVCPU | ||
34 | |||
35 | #define DISABLE_INTERRUPTS \ | ||
36 | mfmsr r0; \ | ||
37 | rldicl r0,r0,48,1; \ | ||
38 | rotldi r0,r0,16; \ | ||
39 | mtmsrd r0,1; \ | ||
40 | |||
41 | #elif defined(CONFIG_PPC_BOOK3S_32) | ||
42 | |||
43 | #define ULONG_SIZE 4 | ||
44 | #define FUNC(name) name | ||
45 | |||
46 | #define GET_SHADOW_VCPU(reg) \ | ||
47 | lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2) | ||
48 | |||
49 | #define DISABLE_INTERRUPTS \ | ||
50 | mfmsr r0; \ | ||
51 | rlwinm r0,r0,0,17,15; \ | ||
52 | mtmsr r0; \ | ||
53 | |||
54 | #endif /* CONFIG_PPC_BOOK3S_XX */ | ||
55 | |||
56 | |||
57 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | ||
38 | #define VCPU_LOAD_NVGPRS(vcpu) \ | 58 | #define VCPU_LOAD_NVGPRS(vcpu) \ |
39 | ld r14, VCPU_GPR(r14)(vcpu); \ | 59 | PPC_LL r14, VCPU_GPR(r14)(vcpu); \ |
40 | ld r15, VCPU_GPR(r15)(vcpu); \ | 60 | PPC_LL r15, VCPU_GPR(r15)(vcpu); \ |
41 | ld r16, VCPU_GPR(r16)(vcpu); \ | 61 | PPC_LL r16, VCPU_GPR(r16)(vcpu); \ |
42 | ld r17, VCPU_GPR(r17)(vcpu); \ | 62 | PPC_LL r17, VCPU_GPR(r17)(vcpu); \ |
43 | ld r18, VCPU_GPR(r18)(vcpu); \ | 63 | PPC_LL r18, VCPU_GPR(r18)(vcpu); \ |
44 | ld r19, VCPU_GPR(r19)(vcpu); \ | 64 | PPC_LL r19, VCPU_GPR(r19)(vcpu); \ |
45 | ld r20, VCPU_GPR(r20)(vcpu); \ | 65 | PPC_LL r20, VCPU_GPR(r20)(vcpu); \ |
46 | ld r21, VCPU_GPR(r21)(vcpu); \ | 66 | PPC_LL r21, VCPU_GPR(r21)(vcpu); \ |
47 | ld r22, VCPU_GPR(r22)(vcpu); \ | 67 | PPC_LL r22, VCPU_GPR(r22)(vcpu); \ |
48 | ld r23, VCPU_GPR(r23)(vcpu); \ | 68 | PPC_LL r23, VCPU_GPR(r23)(vcpu); \ |
49 | ld r24, VCPU_GPR(r24)(vcpu); \ | 69 | PPC_LL r24, VCPU_GPR(r24)(vcpu); \ |
50 | ld r25, VCPU_GPR(r25)(vcpu); \ | 70 | PPC_LL r25, VCPU_GPR(r25)(vcpu); \ |
51 | ld r26, VCPU_GPR(r26)(vcpu); \ | 71 | PPC_LL r26, VCPU_GPR(r26)(vcpu); \ |
52 | ld r27, VCPU_GPR(r27)(vcpu); \ | 72 | PPC_LL r27, VCPU_GPR(r27)(vcpu); \ |
53 | ld r28, VCPU_GPR(r28)(vcpu); \ | 73 | PPC_LL r28, VCPU_GPR(r28)(vcpu); \ |
54 | ld r29, VCPU_GPR(r29)(vcpu); \ | 74 | PPC_LL r29, VCPU_GPR(r29)(vcpu); \ |
55 | ld r30, VCPU_GPR(r30)(vcpu); \ | 75 | PPC_LL r30, VCPU_GPR(r30)(vcpu); \ |
56 | ld r31, VCPU_GPR(r31)(vcpu); \ | 76 | PPC_LL r31, VCPU_GPR(r31)(vcpu); \ |
57 | 77 | ||
58 | /***************************************************************************** | 78 | /***************************************************************************** |
59 | * * | 79 | * * |
@@ -69,11 +89,11 @@ _GLOBAL(__kvmppc_vcpu_entry) | |||
69 | 89 | ||
70 | kvm_start_entry: | 90 | kvm_start_entry: |
71 | /* Write correct stack frame */ | 91 | /* Write correct stack frame */ |
72 | mflr r0 | 92 | mflr r0 |
73 | std r0,16(r1) | 93 | PPC_STL r0,PPC_LR_STKOFF(r1) |
74 | 94 | ||
75 | /* Save host state to the stack */ | 95 | /* Save host state to the stack */ |
76 | stdu r1, -SWITCH_FRAME_SIZE(r1) | 96 | PPC_STLU r1, -SWITCH_FRAME_SIZE(r1) |
77 | 97 | ||
78 | /* Save r3 (kvm_run) and r4 (vcpu) */ | 98 | /* Save r3 (kvm_run) and r4 (vcpu) */ |
79 | SAVE_2GPRS(3, r1) | 99 | SAVE_2GPRS(3, r1) |
@@ -82,33 +102,28 @@ kvm_start_entry: | |||
82 | SAVE_NVGPRS(r1) | 102 | SAVE_NVGPRS(r1) |
83 | 103 | ||
84 | /* Save LR */ | 104 | /* Save LR */ |
85 | std r0, _LINK(r1) | 105 | PPC_STL r0, _LINK(r1) |
86 | 106 | ||
87 | /* Load non-volatile guest state from the vcpu */ | 107 | /* Load non-volatile guest state from the vcpu */ |
88 | VCPU_LOAD_NVGPRS(r4) | 108 | VCPU_LOAD_NVGPRS(r4) |
89 | 109 | ||
110 | GET_SHADOW_VCPU(r5) | ||
111 | |||
90 | /* Save R1/R2 in the PACA */ | 112 | /* Save R1/R2 in the PACA */ |
91 | std r1, PACA_KVM_HOST_R1(r13) | 113 | PPC_STL r1, SVCPU_HOST_R1(r5) |
92 | std r2, PACA_KVM_HOST_R2(r13) | 114 | PPC_STL r2, SVCPU_HOST_R2(r5) |
93 | 115 | ||
94 | /* XXX swap in/out on load? */ | 116 | /* XXX swap in/out on load? */ |
95 | ld r3, VCPU_HIGHMEM_HANDLER(r4) | 117 | PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4) |
96 | std r3, PACA_KVM_VMHANDLER(r13) | 118 | PPC_STL r3, SVCPU_VMHANDLER(r5) |
97 | 119 | ||
98 | kvm_start_lightweight: | 120 | kvm_start_lightweight: |
99 | 121 | ||
100 | ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ | 122 | PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ |
101 | ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ | ||
102 | |||
103 | /* Load some guest state in the respective registers */ | ||
104 | ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */ | ||
105 | /* will be swapped in by rmcall */ | ||
106 | |||
107 | ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ | ||
108 | mtlr r3 /* LR = r3 */ | ||
109 | 123 | ||
110 | DISABLE_INTERRUPTS | 124 | DISABLE_INTERRUPTS |
111 | 125 | ||
126 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
112 | /* Some guests may need to have dcbz set to 32 byte length. | 127 | /* Some guests may need to have dcbz set to 32 byte length. |
113 | * | 128 | * |
114 | * Usually we ensure that by patching the guest's instructions | 129 | * Usually we ensure that by patching the guest's instructions |
@@ -118,7 +133,7 @@ kvm_start_lightweight: | |||
118 | * because that's a lot faster. | 133 | * because that's a lot faster. |
119 | */ | 134 | */ |
120 | 135 | ||
121 | ld r3, VCPU_HFLAGS(r4) | 136 | PPC_LL r3, VCPU_HFLAGS(r4) |
122 | rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */ | 137 | rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */ |
123 | beq no_dcbz32_on | 138 | beq no_dcbz32_on |
124 | 139 | ||
@@ -128,13 +143,15 @@ kvm_start_lightweight: | |||
128 | 143 | ||
129 | no_dcbz32_on: | 144 | no_dcbz32_on: |
130 | 145 | ||
131 | ld r6, VCPU_RMCALL(r4) | 146 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
147 | |||
148 | PPC_LL r6, VCPU_RMCALL(r4) | ||
132 | mtctr r6 | 149 | mtctr r6 |
133 | 150 | ||
134 | ld r3, VCPU_TRAMPOLINE_ENTER(r4) | 151 | PPC_LL r3, VCPU_TRAMPOLINE_ENTER(r4) |
135 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | 152 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) |
136 | 153 | ||
137 | /* Jump to SLB patching handlder and into our guest */ | 154 | /* Jump to segment patching handler and into our guest */ |
138 | bctr | 155 | bctr |
139 | 156 | ||
140 | /* | 157 | /* |
@@ -149,31 +166,20 @@ kvmppc_handler_highmem: | |||
149 | /* | 166 | /* |
150 | * Register usage at this point: | 167 | * Register usage at this point: |
151 | * | 168 | * |
152 | * R0 = guest last inst | 169 | * R1 = host R1 |
153 | * R1 = host R1 | 170 | * R2 = host R2 |
154 | * R2 = host R2 | 171 | * R12 = exit handler id |
155 | * R3 = guest PC | 172 | * R13 = PACA |
156 | * R4 = guest MSR | 173 | * SVCPU.* = guest * |
157 | * R5 = guest DAR | ||
158 | * R6 = guest DSISR | ||
159 | * R13 = PACA | ||
160 | * PACA.KVM.* = guest * | ||
161 | * | 174 | * |
162 | */ | 175 | */ |
163 | 176 | ||
164 | /* R7 = vcpu */ | 177 | /* R7 = vcpu */ |
165 | ld r7, GPR4(r1) | 178 | PPC_LL r7, GPR4(r1) |
166 | |||
167 | /* Now save the guest state */ | ||
168 | |||
169 | stw r0, VCPU_LAST_INST(r7) | ||
170 | 179 | ||
171 | std r3, VCPU_PC(r7) | 180 | #ifdef CONFIG_PPC_BOOK3S_64 |
172 | std r4, VCPU_SHADOW_SRR1(r7) | ||
173 | std r5, VCPU_FAULT_DEAR(r7) | ||
174 | stw r6, VCPU_FAULT_DSISR(r7) | ||
175 | 181 | ||
176 | ld r5, VCPU_HFLAGS(r7) | 182 | PPC_LL r5, VCPU_HFLAGS(r7) |
177 | rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ | 183 | rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ |
178 | beq no_dcbz32_off | 184 | beq no_dcbz32_off |
179 | 185 | ||
@@ -184,35 +190,29 @@ kvmppc_handler_highmem: | |||
184 | 190 | ||
185 | no_dcbz32_off: | 191 | no_dcbz32_off: |
186 | 192 | ||
187 | std r14, VCPU_GPR(r14)(r7) | 193 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
188 | std r15, VCPU_GPR(r15)(r7) | 194 | |
189 | std r16, VCPU_GPR(r16)(r7) | 195 | PPC_STL r14, VCPU_GPR(r14)(r7) |
190 | std r17, VCPU_GPR(r17)(r7) | 196 | PPC_STL r15, VCPU_GPR(r15)(r7) |
191 | std r18, VCPU_GPR(r18)(r7) | 197 | PPC_STL r16, VCPU_GPR(r16)(r7) |
192 | std r19, VCPU_GPR(r19)(r7) | 198 | PPC_STL r17, VCPU_GPR(r17)(r7) |
193 | std r20, VCPU_GPR(r20)(r7) | 199 | PPC_STL r18, VCPU_GPR(r18)(r7) |
194 | std r21, VCPU_GPR(r21)(r7) | 200 | PPC_STL r19, VCPU_GPR(r19)(r7) |
195 | std r22, VCPU_GPR(r22)(r7) | 201 | PPC_STL r20, VCPU_GPR(r20)(r7) |
196 | std r23, VCPU_GPR(r23)(r7) | 202 | PPC_STL r21, VCPU_GPR(r21)(r7) |
197 | std r24, VCPU_GPR(r24)(r7) | 203 | PPC_STL r22, VCPU_GPR(r22)(r7) |
198 | std r25, VCPU_GPR(r25)(r7) | 204 | PPC_STL r23, VCPU_GPR(r23)(r7) |
199 | std r26, VCPU_GPR(r26)(r7) | 205 | PPC_STL r24, VCPU_GPR(r24)(r7) |
200 | std r27, VCPU_GPR(r27)(r7) | 206 | PPC_STL r25, VCPU_GPR(r25)(r7) |
201 | std r28, VCPU_GPR(r28)(r7) | 207 | PPC_STL r26, VCPU_GPR(r26)(r7) |
202 | std r29, VCPU_GPR(r29)(r7) | 208 | PPC_STL r27, VCPU_GPR(r27)(r7) |
203 | std r30, VCPU_GPR(r30)(r7) | 209 | PPC_STL r28, VCPU_GPR(r28)(r7) |
204 | std r31, VCPU_GPR(r31)(r7) | 210 | PPC_STL r29, VCPU_GPR(r29)(r7) |
205 | 211 | PPC_STL r30, VCPU_GPR(r30)(r7) | |
206 | /* Save guest CTR */ | 212 | PPC_STL r31, VCPU_GPR(r31)(r7) |
207 | mfctr r5 | ||
208 | std r5, VCPU_CTR(r7) | ||
209 | |||
210 | /* Save guest LR */ | ||
211 | mflr r5 | ||
212 | std r5, VCPU_LR(r7) | ||
213 | 213 | ||
214 | /* Restore host msr -> SRR1 */ | 214 | /* Restore host msr -> SRR1 */ |
215 | ld r6, VCPU_HOST_MSR(r7) | 215 | PPC_LL r6, VCPU_HOST_MSR(r7) |
216 | 216 | ||
217 | /* | 217 | /* |
218 | * For some interrupts, we need to call the real Linux | 218 | * For some interrupts, we need to call the real Linux |
@@ -231,6 +231,7 @@ no_dcbz32_off: | |||
231 | 231 | ||
232 | /* Back to EE=1 */ | 232 | /* Back to EE=1 */ |
233 | mtmsr r6 | 233 | mtmsr r6 |
234 | sync | ||
234 | b kvm_return_point | 235 | b kvm_return_point |
235 | 236 | ||
236 | call_linux_handler: | 237 | call_linux_handler: |
@@ -249,14 +250,14 @@ call_linux_handler: | |||
249 | */ | 250 | */ |
250 | 251 | ||
251 | /* Restore host IP -> SRR0 */ | 252 | /* Restore host IP -> SRR0 */ |
252 | ld r5, VCPU_HOST_RETIP(r7) | 253 | PPC_LL r5, VCPU_HOST_RETIP(r7) |
253 | 254 | ||
254 | /* XXX Better move to a safe function? | 255 | /* XXX Better move to a safe function? |
255 | * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */ | 256 | * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */ |
256 | 257 | ||
257 | mtlr r12 | 258 | mtlr r12 |
258 | 259 | ||
259 | ld r4, VCPU_TRAMPOLINE_LOWMEM(r7) | 260 | PPC_LL r4, VCPU_TRAMPOLINE_LOWMEM(r7) |
260 | mtsrr0 r4 | 261 | mtsrr0 r4 |
261 | LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | 262 | LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) |
262 | mtsrr1 r3 | 263 | mtsrr1 r3 |
@@ -274,7 +275,7 @@ kvm_return_point: | |||
274 | 275 | ||
275 | /* Restore r3 (kvm_run) and r4 (vcpu) */ | 276 | /* Restore r3 (kvm_run) and r4 (vcpu) */ |
276 | REST_2GPRS(3, r1) | 277 | REST_2GPRS(3, r1) |
277 | bl KVMPPC_HANDLE_EXIT | 278 | bl FUNC(kvmppc_handle_exit) |
278 | 279 | ||
279 | /* If RESUME_GUEST, get back in the loop */ | 280 | /* If RESUME_GUEST, get back in the loop */ |
280 | cmpwi r3, RESUME_GUEST | 281 | cmpwi r3, RESUME_GUEST |
@@ -285,7 +286,7 @@ kvm_return_point: | |||
285 | 286 | ||
286 | kvm_exit_loop: | 287 | kvm_exit_loop: |
287 | 288 | ||
288 | ld r4, _LINK(r1) | 289 | PPC_LL r4, _LINK(r1) |
289 | mtlr r4 | 290 | mtlr r4 |
290 | 291 | ||
291 | /* Restore non-volatile host registers (r14 - r31) */ | 292 | /* Restore non-volatile host registers (r14 - r31) */ |
@@ -296,8 +297,8 @@ kvm_exit_loop: | |||
296 | 297 | ||
297 | kvm_loop_heavyweight: | 298 | kvm_loop_heavyweight: |
298 | 299 | ||
299 | ld r4, _LINK(r1) | 300 | PPC_LL r4, _LINK(r1) |
300 | std r4, (16 + SWITCH_FRAME_SIZE)(r1) | 301 | PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1) |
301 | 302 | ||
302 | /* Load vcpu and cpu_run */ | 303 | /* Load vcpu and cpu_run */ |
303 | REST_2GPRS(3, r1) | 304 | REST_2GPRS(3, r1) |