diff options
Diffstat (limited to 'arch/ia64/kvm/vmm_ivt.S')
-rw-r--r-- | arch/ia64/kvm/vmm_ivt.S | 1469 |
1 files changed, 732 insertions, 737 deletions
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S index c1d7251a1480..3ef1a017a318 100644 --- a/arch/ia64/kvm/vmm_ivt.S +++ b/arch/ia64/kvm/vmm_ivt.S | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * /ia64/kvm_ivt.S | 2 | * arch/ia64/kvm/vmm_ivt.S |
3 | * | 3 | * |
4 | * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co | 4 | * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co |
5 | * Stephane Eranian <eranian@hpl.hp.com> | 5 | * Stephane Eranian <eranian@hpl.hp.com> |
@@ -70,32 +70,39 @@ | |||
70 | # define PSR_DEFAULT_BITS 0 | 70 | # define PSR_DEFAULT_BITS 0 |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | |||
74 | #define KVM_FAULT(n) \ | 73 | #define KVM_FAULT(n) \ |
75 | kvm_fault_##n:; \ | 74 | kvm_fault_##n:; \ |
76 | mov r19=n;; \ | 75 | mov r19=n;; \ |
77 | br.sptk.many kvm_fault_##n; \ | 76 | br.sptk.many kvm_vmm_panic; \ |
78 | ;; \ | 77 | ;; \ |
79 | |||
80 | 78 | ||
81 | #define KVM_REFLECT(n) \ | 79 | #define KVM_REFLECT(n) \ |
82 | mov r31=pr; \ | 80 | mov r31=pr; \ |
83 | mov r19=n; /* prepare to save predicates */ \ | 81 | mov r19=n; /* prepare to save predicates */ \ |
84 | mov r29=cr.ipsr; \ | 82 | mov r29=cr.ipsr; \ |
85 | ;; \ | 83 | ;; \ |
86 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ | 84 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ |
87 | (p7)br.sptk.many kvm_dispatch_reflection; \ | 85 | (p7) br.sptk.many kvm_dispatch_reflection; \ |
88 | br.sptk.many kvm_panic; \ | 86 | br.sptk.many kvm_vmm_panic; \ |
89 | 87 | ||
90 | 88 | GLOBAL_ENTRY(kvm_vmm_panic) | |
91 | GLOBAL_ENTRY(kvm_panic) | 89 | KVM_SAVE_MIN_WITH_COVER_R19 |
92 | br.sptk.many kvm_panic | 90 | alloc r14=ar.pfs,0,0,1,0 |
93 | ;; | 91 | mov out0=r15 |
94 | END(kvm_panic) | 92 | adds r3=8,r2 // set up second base pointer |
95 | 93 | ;; | |
96 | 94 | ssm psr.ic | |
97 | 95 | ;; | |
98 | 96 | srlz.i // guarantee that interruption collection is on | |
97 | ;; | ||
98 | //(p15) ssm psr.i // restore psr.i | ||
99 | addl r14=@gprel(ia64_leave_hypervisor),gp | ||
100 | ;; | ||
101 | KVM_SAVE_REST | ||
102 | mov rp=r14 | ||
103 | ;; | ||
104 | br.call.sptk.many b6=vmm_panic_handler; | ||
105 | END(kvm_vmm_panic) | ||
99 | 106 | ||
100 | .section .text.ivt,"ax" | 107 | .section .text.ivt,"ax" |
101 | 108 | ||
@@ -105,308 +112,307 @@ kvm_ia64_ivt: | |||
105 | /////////////////////////////////////////////////////////////// | 112 | /////////////////////////////////////////////////////////////// |
106 | // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) | 113 | // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) |
107 | ENTRY(kvm_vhpt_miss) | 114 | ENTRY(kvm_vhpt_miss) |
108 | KVM_FAULT(0) | 115 | KVM_FAULT(0) |
109 | END(kvm_vhpt_miss) | 116 | END(kvm_vhpt_miss) |
110 | 117 | ||
111 | |||
112 | .org kvm_ia64_ivt+0x400 | 118 | .org kvm_ia64_ivt+0x400 |
113 | //////////////////////////////////////////////////////////////// | 119 | //////////////////////////////////////////////////////////////// |
114 | // 0x0400 Entry 1 (size 64 bundles) ITLB (21) | 120 | // 0x0400 Entry 1 (size 64 bundles) ITLB (21) |
115 | ENTRY(kvm_itlb_miss) | 121 | ENTRY(kvm_itlb_miss) |
116 | mov r31 = pr | 122 | mov r31 = pr |
117 | mov r29=cr.ipsr; | 123 | mov r29=cr.ipsr; |
118 | ;; | 124 | ;; |
119 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; | 125 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; |
120 | (p6) br.sptk kvm_alt_itlb_miss | 126 | (p6) br.sptk kvm_alt_itlb_miss |
121 | mov r19 = 1 | 127 | mov r19 = 1 |
122 | br.sptk kvm_itlb_miss_dispatch | 128 | br.sptk kvm_itlb_miss_dispatch |
123 | KVM_FAULT(1); | 129 | KVM_FAULT(1); |
124 | END(kvm_itlb_miss) | 130 | END(kvm_itlb_miss) |
125 | 131 | ||
126 | .org kvm_ia64_ivt+0x0800 | 132 | .org kvm_ia64_ivt+0x0800 |
127 | ////////////////////////////////////////////////////////////////// | 133 | ////////////////////////////////////////////////////////////////// |
128 | // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) | 134 | // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) |
129 | ENTRY(kvm_dtlb_miss) | 135 | ENTRY(kvm_dtlb_miss) |
130 | mov r31 = pr | 136 | mov r31 = pr |
131 | mov r29=cr.ipsr; | 137 | mov r29=cr.ipsr; |
132 | ;; | 138 | ;; |
133 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; | 139 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; |
134 | (p6)br.sptk kvm_alt_dtlb_miss | 140 | (p6) br.sptk kvm_alt_dtlb_miss |
135 | br.sptk kvm_dtlb_miss_dispatch | 141 | br.sptk kvm_dtlb_miss_dispatch |
136 | END(kvm_dtlb_miss) | 142 | END(kvm_dtlb_miss) |
137 | 143 | ||
138 | .org kvm_ia64_ivt+0x0c00 | 144 | .org kvm_ia64_ivt+0x0c00 |
139 | //////////////////////////////////////////////////////////////////// | 145 | //////////////////////////////////////////////////////////////////// |
140 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) | 146 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) |
141 | ENTRY(kvm_alt_itlb_miss) | 147 | ENTRY(kvm_alt_itlb_miss) |
142 | mov r16=cr.ifa // get address that caused the TLB miss | 148 | mov r16=cr.ifa // get address that caused the TLB miss |
143 | ;; | 149 | ;; |
144 | movl r17=PAGE_KERNEL | 150 | movl r17=PAGE_KERNEL |
145 | mov r24=cr.ipsr | 151 | mov r24=cr.ipsr |
146 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | 152 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
147 | ;; | 153 | ;; |
148 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits | 154 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits |
149 | ;; | 155 | ;; |
150 | or r19=r17,r19 // insert PTE control bits into r19 | 156 | or r19=r17,r19 // insert PTE control bits into r19 |
151 | ;; | 157 | ;; |
152 | movl r20=IA64_GRANULE_SHIFT<<2 | 158 | movl r20=IA64_GRANULE_SHIFT<<2 |
153 | ;; | 159 | ;; |
154 | mov cr.itir=r20 | 160 | mov cr.itir=r20 |
155 | ;; | 161 | ;; |
156 | itc.i r19 // insert the TLB entry | 162 | itc.i r19 // insert the TLB entry |
157 | mov pr=r31,-1 | 163 | mov pr=r31,-1 |
158 | rfi | 164 | rfi |
159 | END(kvm_alt_itlb_miss) | 165 | END(kvm_alt_itlb_miss) |
160 | 166 | ||
161 | .org kvm_ia64_ivt+0x1000 | 167 | .org kvm_ia64_ivt+0x1000 |
162 | ///////////////////////////////////////////////////////////////////// | 168 | ///////////////////////////////////////////////////////////////////// |
163 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) | 169 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) |
164 | ENTRY(kvm_alt_dtlb_miss) | 170 | ENTRY(kvm_alt_dtlb_miss) |
165 | mov r16=cr.ifa // get address that caused the TLB miss | 171 | mov r16=cr.ifa // get address that caused the TLB miss |
166 | ;; | 172 | ;; |
167 | movl r17=PAGE_KERNEL | 173 | movl r17=PAGE_KERNEL |
168 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | 174 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
169 | mov r24=cr.ipsr | 175 | mov r24=cr.ipsr |
170 | ;; | 176 | ;; |
171 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits | 177 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits |
172 | ;; | 178 | ;; |
173 | or r19=r19,r17 // insert PTE control bits into r19 | 179 | or r19=r19,r17 // insert PTE control bits into r19 |
174 | ;; | 180 | ;; |
175 | movl r20=IA64_GRANULE_SHIFT<<2 | 181 | movl r20=IA64_GRANULE_SHIFT<<2 |
176 | ;; | 182 | ;; |
177 | mov cr.itir=r20 | 183 | mov cr.itir=r20 |
178 | ;; | 184 | ;; |
179 | itc.d r19 // insert the TLB entry | 185 | itc.d r19 // insert the TLB entry |
180 | mov pr=r31,-1 | 186 | mov pr=r31,-1 |
181 | rfi | 187 | rfi |
182 | END(kvm_alt_dtlb_miss) | 188 | END(kvm_alt_dtlb_miss) |
183 | 189 | ||
184 | .org kvm_ia64_ivt+0x1400 | 190 | .org kvm_ia64_ivt+0x1400 |
185 | ////////////////////////////////////////////////////////////////////// | 191 | ////////////////////////////////////////////////////////////////////// |
186 | // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) | 192 | // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) |
187 | ENTRY(kvm_nested_dtlb_miss) | 193 | ENTRY(kvm_nested_dtlb_miss) |
188 | KVM_FAULT(5) | 194 | KVM_FAULT(5) |
189 | END(kvm_nested_dtlb_miss) | 195 | END(kvm_nested_dtlb_miss) |
190 | 196 | ||
191 | .org kvm_ia64_ivt+0x1800 | 197 | .org kvm_ia64_ivt+0x1800 |
192 | ///////////////////////////////////////////////////////////////////// | 198 | ///////////////////////////////////////////////////////////////////// |
193 | // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) | 199 | // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) |
194 | ENTRY(kvm_ikey_miss) | 200 | ENTRY(kvm_ikey_miss) |
195 | KVM_REFLECT(6) | 201 | KVM_REFLECT(6) |
196 | END(kvm_ikey_miss) | 202 | END(kvm_ikey_miss) |
197 | 203 | ||
198 | .org kvm_ia64_ivt+0x1c00 | 204 | .org kvm_ia64_ivt+0x1c00 |
199 | ///////////////////////////////////////////////////////////////////// | 205 | ///////////////////////////////////////////////////////////////////// |
200 | // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) | 206 | // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) |
201 | ENTRY(kvm_dkey_miss) | 207 | ENTRY(kvm_dkey_miss) |
202 | KVM_REFLECT(7) | 208 | KVM_REFLECT(7) |
203 | END(kvm_dkey_miss) | 209 | END(kvm_dkey_miss) |
204 | 210 | ||
205 | .org kvm_ia64_ivt+0x2000 | 211 | .org kvm_ia64_ivt+0x2000 |
206 | //////////////////////////////////////////////////////////////////// | 212 | //////////////////////////////////////////////////////////////////// |
207 | // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) | 213 | // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) |
208 | ENTRY(kvm_dirty_bit) | 214 | ENTRY(kvm_dirty_bit) |
209 | KVM_REFLECT(8) | 215 | KVM_REFLECT(8) |
210 | END(kvm_dirty_bit) | 216 | END(kvm_dirty_bit) |
211 | 217 | ||
212 | .org kvm_ia64_ivt+0x2400 | 218 | .org kvm_ia64_ivt+0x2400 |
213 | //////////////////////////////////////////////////////////////////// | 219 | //////////////////////////////////////////////////////////////////// |
214 | // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) | 220 | // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) |
215 | ENTRY(kvm_iaccess_bit) | 221 | ENTRY(kvm_iaccess_bit) |
216 | KVM_REFLECT(9) | 222 | KVM_REFLECT(9) |
217 | END(kvm_iaccess_bit) | 223 | END(kvm_iaccess_bit) |
218 | 224 | ||
219 | .org kvm_ia64_ivt+0x2800 | 225 | .org kvm_ia64_ivt+0x2800 |
220 | /////////////////////////////////////////////////////////////////// | 226 | /////////////////////////////////////////////////////////////////// |
221 | // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) | 227 | // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) |
222 | ENTRY(kvm_daccess_bit) | 228 | ENTRY(kvm_daccess_bit) |
223 | KVM_REFLECT(10) | 229 | KVM_REFLECT(10) |
224 | END(kvm_daccess_bit) | 230 | END(kvm_daccess_bit) |
225 | 231 | ||
226 | .org kvm_ia64_ivt+0x2c00 | 232 | .org kvm_ia64_ivt+0x2c00 |
227 | ///////////////////////////////////////////////////////////////// | 233 | ///////////////////////////////////////////////////////////////// |
228 | // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) | 234 | // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) |
229 | ENTRY(kvm_break_fault) | 235 | ENTRY(kvm_break_fault) |
230 | mov r31=pr | 236 | mov r31=pr |
231 | mov r19=11 | 237 | mov r19=11 |
232 | mov r29=cr.ipsr | 238 | mov r29=cr.ipsr |
233 | ;; | 239 | ;; |
234 | KVM_SAVE_MIN_WITH_COVER_R19 | 240 | KVM_SAVE_MIN_WITH_COVER_R19 |
235 | ;; | 241 | ;; |
236 | alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) | 242 | alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!) |
237 | mov out0=cr.ifa | 243 | mov out0=cr.ifa |
238 | mov out2=cr.isr // FIXME: pity to make this slow access twice | 244 | mov out2=cr.isr // FIXME: pity to make this slow access twice |
239 | mov out3=cr.iim // FIXME: pity to make this slow access twice | 245 | mov out3=cr.iim // FIXME: pity to make this slow access twice |
240 | adds r3=8,r2 // set up second base pointer | 246 | adds r3=8,r2 // set up second base pointer |
241 | ;; | 247 | ;; |
242 | ssm psr.ic | 248 | ssm psr.ic |
243 | ;; | 249 | ;; |
244 | srlz.i // guarantee that interruption collection is on | 250 | srlz.i // guarantee that interruption collection is on |
245 | ;; | 251 | ;; |
246 | //(p15)ssm psr.i // restore psr.i | 252 | //(p15)ssm psr.i // restore psr.i |
247 | addl r14=@gprel(ia64_leave_hypervisor),gp | 253 | addl r14=@gprel(ia64_leave_hypervisor),gp |
248 | ;; | 254 | ;; |
249 | KVM_SAVE_REST | 255 | KVM_SAVE_REST |
250 | mov rp=r14 | 256 | mov rp=r14 |
251 | ;; | 257 | ;; |
252 | adds out1=16,sp | 258 | adds out1=16,sp |
253 | br.call.sptk.many b6=kvm_ia64_handle_break | 259 | br.call.sptk.many b6=kvm_ia64_handle_break |
254 | ;; | 260 | ;; |
255 | END(kvm_break_fault) | 261 | END(kvm_break_fault) |
256 | 262 | ||
257 | .org kvm_ia64_ivt+0x3000 | 263 | .org kvm_ia64_ivt+0x3000 |
258 | ///////////////////////////////////////////////////////////////// | 264 | ///////////////////////////////////////////////////////////////// |
259 | // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) | 265 | // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) |
260 | ENTRY(kvm_interrupt) | 266 | ENTRY(kvm_interrupt) |
261 | mov r31=pr // prepare to save predicates | 267 | mov r31=pr // prepare to save predicates |
262 | mov r19=12 | 268 | mov r19=12 |
263 | mov r29=cr.ipsr | 269 | mov r29=cr.ipsr |
264 | ;; | 270 | ;; |
265 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT | 271 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT |
266 | tbit.z p0,p15=r29,IA64_PSR_I_BIT | 272 | tbit.z p0,p15=r29,IA64_PSR_I_BIT |
267 | ;; | 273 | ;; |
268 | (p7) br.sptk kvm_dispatch_interrupt | 274 | (p7) br.sptk kvm_dispatch_interrupt |
269 | ;; | 275 | ;; |
270 | mov r27=ar.rsc /* M */ | 276 | mov r27=ar.rsc /* M */ |
271 | mov r20=r1 /* A */ | 277 | mov r20=r1 /* A */ |
272 | mov r25=ar.unat /* M */ | 278 | mov r25=ar.unat /* M */ |
273 | mov r26=ar.pfs /* I */ | 279 | mov r26=ar.pfs /* I */ |
274 | mov r28=cr.iip /* M */ | 280 | mov r28=cr.iip /* M */ |
275 | cover /* B (or nothing) */ | 281 | cover /* B (or nothing) */ |
276 | ;; | 282 | ;; |
277 | mov r1=sp | 283 | mov r1=sp |
278 | ;; | 284 | ;; |
279 | invala /* M */ | 285 | invala /* M */ |
280 | mov r30=cr.ifs | 286 | mov r30=cr.ifs |
281 | ;; | 287 | ;; |
282 | addl r1=-VMM_PT_REGS_SIZE,r1 | 288 | addl r1=-VMM_PT_REGS_SIZE,r1 |
283 | ;; | 289 | ;; |
284 | adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ | 290 | adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ |
285 | adds r16=PT(CR_IPSR),r1 | 291 | adds r16=PT(CR_IPSR),r1 |
286 | ;; | 292 | ;; |
287 | lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES | 293 | lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES |
288 | st8 [r16]=r29 /* save cr.ipsr */ | 294 | st8 [r16]=r29 /* save cr.ipsr */ |
289 | ;; | 295 | ;; |
290 | lfetch.fault.excl.nt1 [r17] | 296 | lfetch.fault.excl.nt1 [r17] |
291 | mov r29=b0 | 297 | mov r29=b0 |
292 | ;; | 298 | ;; |
293 | adds r16=PT(R8),r1 /* initialize first base pointer */ | 299 | adds r16=PT(R8),r1 /* initialize first base pointer */ |
294 | adds r17=PT(R9),r1 /* initialize second base pointer */ | 300 | adds r17=PT(R9),r1 /* initialize second base pointer */ |
295 | mov r18=r0 /* make sure r18 isn't NaT */ | 301 | mov r18=r0 /* make sure r18 isn't NaT */ |
296 | ;; | 302 | ;; |
297 | .mem.offset 0,0; st8.spill [r16]=r8,16 | 303 | .mem.offset 0,0; st8.spill [r16]=r8,16 |
298 | .mem.offset 8,0; st8.spill [r17]=r9,16 | 304 | .mem.offset 8,0; st8.spill [r17]=r9,16 |
299 | ;; | 305 | ;; |
300 | .mem.offset 0,0; st8.spill [r16]=r10,24 | 306 | .mem.offset 0,0; st8.spill [r16]=r10,24 |
301 | .mem.offset 8,0; st8.spill [r17]=r11,24 | 307 | .mem.offset 8,0; st8.spill [r17]=r11,24 |
302 | ;; | 308 | ;; |
303 | st8 [r16]=r28,16 /* save cr.iip */ | 309 | st8 [r16]=r28,16 /* save cr.iip */ |
304 | st8 [r17]=r30,16 /* save cr.ifs */ | 310 | st8 [r17]=r30,16 /* save cr.ifs */ |
305 | mov r8=ar.fpsr /* M */ | 311 | mov r8=ar.fpsr /* M */ |
306 | mov r9=ar.csd | 312 | mov r9=ar.csd |
307 | mov r10=ar.ssd | 313 | mov r10=ar.ssd |
308 | movl r11=FPSR_DEFAULT /* L-unit */ | 314 | movl r11=FPSR_DEFAULT /* L-unit */ |
309 | ;; | 315 | ;; |
310 | st8 [r16]=r25,16 /* save ar.unat */ | 316 | st8 [r16]=r25,16 /* save ar.unat */ |
311 | st8 [r17]=r26,16 /* save ar.pfs */ | 317 | st8 [r17]=r26,16 /* save ar.pfs */ |
312 | shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */ | 318 | shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */ |
313 | ;; | 319 | ;; |
314 | st8 [r16]=r27,16 /* save ar.rsc */ | 320 | st8 [r16]=r27,16 /* save ar.rsc */ |
315 | adds r17=16,r17 /* skip over ar_rnat field */ | 321 | adds r17=16,r17 /* skip over ar_rnat field */ |
316 | ;; | 322 | ;; |
317 | st8 [r17]=r31,16 /* save predicates */ | 323 | st8 [r17]=r31,16 /* save predicates */ |
318 | adds r16=16,r16 /* skip over ar_bspstore field */ | 324 | adds r16=16,r16 /* skip over ar_bspstore field */ |
319 | ;; | 325 | ;; |
320 | st8 [r16]=r29,16 /* save b0 */ | 326 | st8 [r16]=r29,16 /* save b0 */ |
321 | st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */ | 327 | st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */ |
322 | ;; | 328 | ;; |
323 | .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ | 329 | .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ |
324 | .mem.offset 8,0; st8.spill [r17]=r12,16 | 330 | .mem.offset 8,0; st8.spill [r17]=r12,16 |
325 | adds r12=-16,r1 | 331 | adds r12=-16,r1 |
326 | /* switch to kernel memory stack (with 16 bytes of scratch) */ | 332 | /* switch to kernel memory stack (with 16 bytes of scratch) */ |
327 | ;; | 333 | ;; |
328 | .mem.offset 0,0; st8.spill [r16]=r13,16 | 334 | .mem.offset 0,0; st8.spill [r16]=r13,16 |
329 | .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */ | 335 | .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */ |
330 | ;; | 336 | ;; |
331 | .mem.offset 0,0; st8.spill [r16]=r15,16 | 337 | .mem.offset 0,0; st8.spill [r16]=r15,16 |
332 | .mem.offset 8,0; st8.spill [r17]=r14,16 | 338 | .mem.offset 8,0; st8.spill [r17]=r14,16 |
333 | dep r14=-1,r0,60,4 | 339 | dep r14=-1,r0,60,4 |
334 | ;; | 340 | ;; |
335 | .mem.offset 0,0; st8.spill [r16]=r2,16 | 341 | .mem.offset 0,0; st8.spill [r16]=r2,16 |
336 | .mem.offset 8,0; st8.spill [r17]=r3,16 | 342 | .mem.offset 8,0; st8.spill [r17]=r3,16 |
337 | adds r2=VMM_PT_REGS_R16_OFFSET,r1 | 343 | adds r2=VMM_PT_REGS_R16_OFFSET,r1 |
338 | adds r14 = VMM_VCPU_GP_OFFSET,r13 | 344 | adds r14 = VMM_VCPU_GP_OFFSET,r13 |
339 | ;; | 345 | ;; |
340 | mov r8=ar.ccv | 346 | mov r8=ar.ccv |
341 | ld8 r14 = [r14] | 347 | ld8 r14 = [r14] |
342 | ;; | 348 | ;; |
343 | mov r1=r14 /* establish kernel global pointer */ | 349 | mov r1=r14 /* establish kernel global pointer */ |
344 | ;; \ | 350 | ;; \ |
345 | bsw.1 | 351 | bsw.1 |
346 | ;; | 352 | ;; |
347 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group | 353 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group |
348 | mov out0=r13 | 354 | mov out0=r13 |
349 | ;; | 355 | ;; |
350 | ssm psr.ic | 356 | ssm psr.ic |
351 | ;; | 357 | ;; |
352 | srlz.i | 358 | srlz.i |
353 | ;; | 359 | ;; |
354 | //(p15) ssm psr.i | 360 | //(p15) ssm psr.i |
355 | adds r3=8,r2 // set up second base pointer for SAVE_REST | 361 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
356 | srlz.i // ensure everybody knows psr.ic is back on | 362 | srlz.i // ensure everybody knows psr.ic is back on |
357 | ;; | 363 | ;; |
358 | .mem.offset 0,0; st8.spill [r2]=r16,16 | 364 | .mem.offset 0,0; st8.spill [r2]=r16,16 |
359 | .mem.offset 8,0; st8.spill [r3]=r17,16 | 365 | .mem.offset 8,0; st8.spill [r3]=r17,16 |
360 | ;; | 366 | ;; |
361 | .mem.offset 0,0; st8.spill [r2]=r18,16 | 367 | .mem.offset 0,0; st8.spill [r2]=r18,16 |
362 | .mem.offset 8,0; st8.spill [r3]=r19,16 | 368 | .mem.offset 8,0; st8.spill [r3]=r19,16 |
363 | ;; | 369 | ;; |
364 | .mem.offset 0,0; st8.spill [r2]=r20,16 | 370 | .mem.offset 0,0; st8.spill [r2]=r20,16 |
365 | .mem.offset 8,0; st8.spill [r3]=r21,16 | 371 | .mem.offset 8,0; st8.spill [r3]=r21,16 |
366 | mov r18=b6 | 372 | mov r18=b6 |
367 | ;; | 373 | ;; |
368 | .mem.offset 0,0; st8.spill [r2]=r22,16 | 374 | .mem.offset 0,0; st8.spill [r2]=r22,16 |
369 | .mem.offset 8,0; st8.spill [r3]=r23,16 | 375 | .mem.offset 8,0; st8.spill [r3]=r23,16 |
370 | mov r19=b7 | 376 | mov r19=b7 |
371 | ;; | 377 | ;; |
372 | .mem.offset 0,0; st8.spill [r2]=r24,16 | 378 | .mem.offset 0,0; st8.spill [r2]=r24,16 |
373 | .mem.offset 8,0; st8.spill [r3]=r25,16 | 379 | .mem.offset 8,0; st8.spill [r3]=r25,16 |
374 | ;; | 380 | ;; |
375 | .mem.offset 0,0; st8.spill [r2]=r26,16 | 381 | .mem.offset 0,0; st8.spill [r2]=r26,16 |
376 | .mem.offset 8,0; st8.spill [r3]=r27,16 | 382 | .mem.offset 8,0; st8.spill [r3]=r27,16 |
377 | ;; | 383 | ;; |
378 | .mem.offset 0,0; st8.spill [r2]=r28,16 | 384 | .mem.offset 0,0; st8.spill [r2]=r28,16 |
379 | .mem.offset 8,0; st8.spill [r3]=r29,16 | 385 | .mem.offset 8,0; st8.spill [r3]=r29,16 |
380 | ;; | 386 | ;; |
381 | .mem.offset 0,0; st8.spill [r2]=r30,16 | 387 | .mem.offset 0,0; st8.spill [r2]=r30,16 |
382 | .mem.offset 8,0; st8.spill [r3]=r31,32 | 388 | .mem.offset 8,0; st8.spill [r3]=r31,32 |
383 | ;; | 389 | ;; |
384 | mov ar.fpsr=r11 /* M-unit */ | 390 | mov ar.fpsr=r11 /* M-unit */ |
385 | st8 [r2]=r8,8 /* ar.ccv */ | 391 | st8 [r2]=r8,8 /* ar.ccv */ |
386 | adds r24=PT(B6)-PT(F7),r3 | 392 | adds r24=PT(B6)-PT(F7),r3 |
387 | ;; | 393 | ;; |
388 | stf.spill [r2]=f6,32 | 394 | stf.spill [r2]=f6,32 |
389 | stf.spill [r3]=f7,32 | 395 | stf.spill [r3]=f7,32 |
390 | ;; | 396 | ;; |
391 | stf.spill [r2]=f8,32 | 397 | stf.spill [r2]=f8,32 |
392 | stf.spill [r3]=f9,32 | 398 | stf.spill [r3]=f9,32 |
393 | ;; | 399 | ;; |
394 | stf.spill [r2]=f10 | 400 | stf.spill [r2]=f10 |
395 | stf.spill [r3]=f11 | 401 | stf.spill [r3]=f11 |
396 | adds r25=PT(B7)-PT(F11),r3 | 402 | adds r25=PT(B7)-PT(F11),r3 |
397 | ;; | 403 | ;; |
398 | st8 [r24]=r18,16 /* b6 */ | 404 | st8 [r24]=r18,16 /* b6 */ |
399 | st8 [r25]=r19,16 /* b7 */ | 405 | st8 [r25]=r19,16 /* b7 */ |
400 | ;; | 406 | ;; |
401 | st8 [r24]=r9 /* ar.csd */ | 407 | st8 [r24]=r9 /* ar.csd */ |
402 | st8 [r25]=r10 /* ar.ssd */ | 408 | st8 [r25]=r10 /* ar.ssd */ |
403 | ;; | 409 | ;; |
404 | srlz.d // make sure we see the effect of cr.ivr | 410 | srlz.d // make sure we see the effect of cr.ivr |
405 | addl r14=@gprel(ia64_leave_nested),gp | 411 | addl r14=@gprel(ia64_leave_nested),gp |
406 | ;; | 412 | ;; |
407 | mov rp=r14 | 413 | mov rp=r14 |
408 | br.call.sptk.many b6=kvm_ia64_handle_irq | 414 | br.call.sptk.many b6=kvm_ia64_handle_irq |
409 | ;; | 415 | ;; |
410 | END(kvm_interrupt) | 416 | END(kvm_interrupt) |
411 | 417 | ||
412 | .global kvm_dispatch_vexirq | 418 | .global kvm_dispatch_vexirq |
@@ -414,387 +420,385 @@ END(kvm_interrupt) | |||
414 | ////////////////////////////////////////////////////////////////////// | 420 | ////////////////////////////////////////////////////////////////////// |
415 | // 0x3400 Entry 13 (size 64 bundles) Reserved | 421 | // 0x3400 Entry 13 (size 64 bundles) Reserved |
416 | ENTRY(kvm_virtual_exirq) | 422 | ENTRY(kvm_virtual_exirq) |
417 | mov r31=pr | 423 | mov r31=pr |
418 | mov r19=13 | 424 | mov r19=13 |
419 | mov r30 =r0 | 425 | mov r30 =r0 |
420 | ;; | 426 | ;; |
421 | kvm_dispatch_vexirq: | 427 | kvm_dispatch_vexirq: |
422 | cmp.eq p6,p0 = 1,r30 | 428 | cmp.eq p6,p0 = 1,r30 |
423 | ;; | 429 | ;; |
424 | (p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 | 430 | (p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 |
425 | ;; | 431 | ;; |
426 | (p6)ld8 r1 = [r29] | 432 | (p6) ld8 r1 = [r29] |
427 | ;; | 433 | ;; |
428 | KVM_SAVE_MIN_WITH_COVER_R19 | 434 | KVM_SAVE_MIN_WITH_COVER_R19 |
429 | alloc r14=ar.pfs,0,0,1,0 | 435 | alloc r14=ar.pfs,0,0,1,0 |
430 | mov out0=r13 | 436 | mov out0=r13 |
431 | 437 | ||
432 | ssm psr.ic | 438 | ssm psr.ic |
433 | ;; | 439 | ;; |
434 | srlz.i // guarantee that interruption collection is on | 440 | srlz.i // guarantee that interruption collection is on |
435 | ;; | 441 | ;; |
436 | //(p15) ssm psr.i // restore psr.i | 442 | //(p15) ssm psr.i // restore psr.i |
437 | adds r3=8,r2 // set up second base pointer | 443 | adds r3=8,r2 // set up second base pointer |
438 | ;; | 444 | ;; |
439 | KVM_SAVE_REST | 445 | KVM_SAVE_REST |
440 | addl r14=@gprel(ia64_leave_hypervisor),gp | 446 | addl r14=@gprel(ia64_leave_hypervisor),gp |
441 | ;; | 447 | ;; |
442 | mov rp=r14 | 448 | mov rp=r14 |
443 | br.call.sptk.many b6=kvm_vexirq | 449 | br.call.sptk.many b6=kvm_vexirq |
444 | END(kvm_virtual_exirq) | 450 | END(kvm_virtual_exirq) |
445 | 451 | ||
446 | .org kvm_ia64_ivt+0x3800 | 452 | .org kvm_ia64_ivt+0x3800 |
447 | ///////////////////////////////////////////////////////////////////// | 453 | ///////////////////////////////////////////////////////////////////// |
448 | // 0x3800 Entry 14 (size 64 bundles) Reserved | 454 | // 0x3800 Entry 14 (size 64 bundles) Reserved |
449 | KVM_FAULT(14) | 455 | KVM_FAULT(14) |
450 | // this code segment is from 2.6.16.13 | 456 | // this code segment is from 2.6.16.13 |
451 | |||
452 | 457 | ||
453 | .org kvm_ia64_ivt+0x3c00 | 458 | .org kvm_ia64_ivt+0x3c00 |
454 | /////////////////////////////////////////////////////////////////////// | 459 | /////////////////////////////////////////////////////////////////////// |
455 | // 0x3c00 Entry 15 (size 64 bundles) Reserved | 460 | // 0x3c00 Entry 15 (size 64 bundles) Reserved |
456 | KVM_FAULT(15) | 461 | KVM_FAULT(15) |
457 | |||
458 | 462 | ||
459 | .org kvm_ia64_ivt+0x4000 | 463 | .org kvm_ia64_ivt+0x4000 |
460 | /////////////////////////////////////////////////////////////////////// | 464 | /////////////////////////////////////////////////////////////////////// |
461 | // 0x4000 Entry 16 (size 64 bundles) Reserved | 465 | // 0x4000 Entry 16 (size 64 bundles) Reserved |
462 | KVM_FAULT(16) | 466 | KVM_FAULT(16) |
463 | 467 | ||
464 | .org kvm_ia64_ivt+0x4400 | 468 | .org kvm_ia64_ivt+0x4400 |
465 | ////////////////////////////////////////////////////////////////////// | 469 | ////////////////////////////////////////////////////////////////////// |
466 | // 0x4400 Entry 17 (size 64 bundles) Reserved | 470 | // 0x4400 Entry 17 (size 64 bundles) Reserved |
467 | KVM_FAULT(17) | 471 | KVM_FAULT(17) |
468 | 472 | ||
469 | .org kvm_ia64_ivt+0x4800 | 473 | .org kvm_ia64_ivt+0x4800 |
470 | ////////////////////////////////////////////////////////////////////// | 474 | ////////////////////////////////////////////////////////////////////// |
471 | // 0x4800 Entry 18 (size 64 bundles) Reserved | 475 | // 0x4800 Entry 18 (size 64 bundles) Reserved |
472 | KVM_FAULT(18) | 476 | KVM_FAULT(18) |
473 | 477 | ||
474 | .org kvm_ia64_ivt+0x4c00 | 478 | .org kvm_ia64_ivt+0x4c00 |
475 | ////////////////////////////////////////////////////////////////////// | 479 | ////////////////////////////////////////////////////////////////////// |
476 | // 0x4c00 Entry 19 (size 64 bundles) Reserved | 480 | // 0x4c00 Entry 19 (size 64 bundles) Reserved |
477 | KVM_FAULT(19) | 481 | KVM_FAULT(19) |
478 | 482 | ||
479 | .org kvm_ia64_ivt+0x5000 | 483 | .org kvm_ia64_ivt+0x5000 |
480 | ////////////////////////////////////////////////////////////////////// | 484 | ////////////////////////////////////////////////////////////////////// |
481 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present | 485 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present |
482 | ENTRY(kvm_page_not_present) | 486 | ENTRY(kvm_page_not_present) |
483 | KVM_REFLECT(20) | 487 | KVM_REFLECT(20) |
484 | END(kvm_page_not_present) | 488 | END(kvm_page_not_present) |
485 | 489 | ||
486 | .org kvm_ia64_ivt+0x5100 | 490 | .org kvm_ia64_ivt+0x5100 |
487 | /////////////////////////////////////////////////////////////////////// | 491 | /////////////////////////////////////////////////////////////////////// |
488 | // 0x5100 Entry 21 (size 16 bundles) Key Permission vector | 492 | // 0x5100 Entry 21 (size 16 bundles) Key Permission vector |
489 | ENTRY(kvm_key_permission) | 493 | ENTRY(kvm_key_permission) |
490 | KVM_REFLECT(21) | 494 | KVM_REFLECT(21) |
491 | END(kvm_key_permission) | 495 | END(kvm_key_permission) |
492 | 496 | ||
493 | .org kvm_ia64_ivt+0x5200 | 497 | .org kvm_ia64_ivt+0x5200 |
494 | ////////////////////////////////////////////////////////////////////// | 498 | ////////////////////////////////////////////////////////////////////// |
495 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) | 499 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) |
496 | ENTRY(kvm_iaccess_rights) | 500 | ENTRY(kvm_iaccess_rights) |
497 | KVM_REFLECT(22) | 501 | KVM_REFLECT(22) |
498 | END(kvm_iaccess_rights) | 502 | END(kvm_iaccess_rights) |
499 | 503 | ||
500 | .org kvm_ia64_ivt+0x5300 | 504 | .org kvm_ia64_ivt+0x5300 |
501 | ////////////////////////////////////////////////////////////////////// | 505 | ////////////////////////////////////////////////////////////////////// |
502 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) | 506 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) |
503 | ENTRY(kvm_daccess_rights) | 507 | ENTRY(kvm_daccess_rights) |
504 | KVM_REFLECT(23) | 508 | KVM_REFLECT(23) |
505 | END(kvm_daccess_rights) | 509 | END(kvm_daccess_rights) |
506 | 510 | ||
507 | .org kvm_ia64_ivt+0x5400 | 511 | .org kvm_ia64_ivt+0x5400 |
508 | ///////////////////////////////////////////////////////////////////// | 512 | ///////////////////////////////////////////////////////////////////// |
509 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) | 513 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) |
510 | ENTRY(kvm_general_exception) | 514 | ENTRY(kvm_general_exception) |
511 | KVM_REFLECT(24) | 515 | KVM_REFLECT(24) |
512 | KVM_FAULT(24) | 516 | KVM_FAULT(24) |
513 | END(kvm_general_exception) | 517 | END(kvm_general_exception) |
514 | 518 | ||
515 | .org kvm_ia64_ivt+0x5500 | 519 | .org kvm_ia64_ivt+0x5500 |
516 | ////////////////////////////////////////////////////////////////////// | 520 | ////////////////////////////////////////////////////////////////////// |
517 | // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) | 521 | // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) |
518 | ENTRY(kvm_disabled_fp_reg) | 522 | ENTRY(kvm_disabled_fp_reg) |
519 | KVM_REFLECT(25) | 523 | KVM_REFLECT(25) |
520 | END(kvm_disabled_fp_reg) | 524 | END(kvm_disabled_fp_reg) |
521 | 525 | ||
522 | .org kvm_ia64_ivt+0x5600 | 526 | .org kvm_ia64_ivt+0x5600 |
523 | //////////////////////////////////////////////////////////////////// | 527 | //////////////////////////////////////////////////////////////////// |
524 | // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) | 528 | // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) |
525 | ENTRY(kvm_nat_consumption) | 529 | ENTRY(kvm_nat_consumption) |
526 | KVM_REFLECT(26) | 530 | KVM_REFLECT(26) |
527 | END(kvm_nat_consumption) | 531 | END(kvm_nat_consumption) |
528 | 532 | ||
529 | .org kvm_ia64_ivt+0x5700 | 533 | .org kvm_ia64_ivt+0x5700 |
530 | ///////////////////////////////////////////////////////////////////// | 534 | ///////////////////////////////////////////////////////////////////// |
531 | // 0x5700 Entry 27 (size 16 bundles) Speculation (40) | 535 | // 0x5700 Entry 27 (size 16 bundles) Speculation (40) |
532 | ENTRY(kvm_speculation_vector) | 536 | ENTRY(kvm_speculation_vector) |
533 | KVM_REFLECT(27) | 537 | KVM_REFLECT(27) |
534 | END(kvm_speculation_vector) | 538 | END(kvm_speculation_vector) |
535 | 539 | ||
536 | .org kvm_ia64_ivt+0x5800 | 540 | .org kvm_ia64_ivt+0x5800 |
537 | ///////////////////////////////////////////////////////////////////// | 541 | ///////////////////////////////////////////////////////////////////// |
538 | // 0x5800 Entry 28 (size 16 bundles) Reserved | 542 | // 0x5800 Entry 28 (size 16 bundles) Reserved |
539 | KVM_FAULT(28) | 543 | KVM_FAULT(28) |
540 | 544 | ||
541 | .org kvm_ia64_ivt+0x5900 | 545 | .org kvm_ia64_ivt+0x5900 |
542 | /////////////////////////////////////////////////////////////////// | 546 | /////////////////////////////////////////////////////////////////// |
543 | // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) | 547 | // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) |
544 | ENTRY(kvm_debug_vector) | 548 | ENTRY(kvm_debug_vector) |
545 | KVM_FAULT(29) | 549 | KVM_FAULT(29) |
546 | END(kvm_debug_vector) | 550 | END(kvm_debug_vector) |
547 | 551 | ||
548 | .org kvm_ia64_ivt+0x5a00 | 552 | .org kvm_ia64_ivt+0x5a00 |
549 | /////////////////////////////////////////////////////////////// | 553 | /////////////////////////////////////////////////////////////// |
550 | // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) | 554 | // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) |
551 | ENTRY(kvm_unaligned_access) | 555 | ENTRY(kvm_unaligned_access) |
552 | KVM_REFLECT(30) | 556 | KVM_REFLECT(30) |
553 | END(kvm_unaligned_access) | 557 | END(kvm_unaligned_access) |
554 | 558 | ||
555 | .org kvm_ia64_ivt+0x5b00 | 559 | .org kvm_ia64_ivt+0x5b00 |
556 | ////////////////////////////////////////////////////////////////////// | 560 | ////////////////////////////////////////////////////////////////////// |
557 | // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) | 561 | // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) |
558 | ENTRY(kvm_unsupported_data_reference) | 562 | ENTRY(kvm_unsupported_data_reference) |
559 | KVM_REFLECT(31) | 563 | KVM_REFLECT(31) |
560 | END(kvm_unsupported_data_reference) | 564 | END(kvm_unsupported_data_reference) |
561 | 565 | ||
562 | .org kvm_ia64_ivt+0x5c00 | 566 | .org kvm_ia64_ivt+0x5c00 |
563 | //////////////////////////////////////////////////////////////////// | 567 | //////////////////////////////////////////////////////////////////// |
564 | // 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65) | 568 | // 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65) |
565 | ENTRY(kvm_floating_point_fault) | 569 | ENTRY(kvm_floating_point_fault) |
566 | KVM_REFLECT(32) | 570 | KVM_REFLECT(32) |
567 | END(kvm_floating_point_fault) | 571 | END(kvm_floating_point_fault) |
568 | 572 | ||
569 | .org kvm_ia64_ivt+0x5d00 | 573 | .org kvm_ia64_ivt+0x5d00 |
570 | ///////////////////////////////////////////////////////////////////// | 574 | ///////////////////////////////////////////////////////////////////// |
571 | // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) | 575 | // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) |
572 | ENTRY(kvm_floating_point_trap) | 576 | ENTRY(kvm_floating_point_trap) |
573 | KVM_REFLECT(33) | 577 | KVM_REFLECT(33) |
574 | END(kvm_floating_point_trap) | 578 | END(kvm_floating_point_trap) |
575 | 579 | ||
576 | .org kvm_ia64_ivt+0x5e00 | 580 | .org kvm_ia64_ivt+0x5e00 |
577 | ////////////////////////////////////////////////////////////////////// | 581 | ////////////////////////////////////////////////////////////////////// |
578 | // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) | 582 | // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) |
579 | ENTRY(kvm_lower_privilege_trap) | 583 | ENTRY(kvm_lower_privilege_trap) |
580 | KVM_REFLECT(34) | 584 | KVM_REFLECT(34) |
581 | END(kvm_lower_privilege_trap) | 585 | END(kvm_lower_privilege_trap) |
582 | 586 | ||
583 | .org kvm_ia64_ivt+0x5f00 | 587 | .org kvm_ia64_ivt+0x5f00 |
584 | ////////////////////////////////////////////////////////////////////// | 588 | ////////////////////////////////////////////////////////////////////// |
585 | // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) | 589 | // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) |
586 | ENTRY(kvm_taken_branch_trap) | 590 | ENTRY(kvm_taken_branch_trap) |
587 | KVM_REFLECT(35) | 591 | KVM_REFLECT(35) |
588 | END(kvm_taken_branch_trap) | 592 | END(kvm_taken_branch_trap) |
589 | 593 | ||
590 | .org kvm_ia64_ivt+0x6000 | 594 | .org kvm_ia64_ivt+0x6000 |
591 | //////////////////////////////////////////////////////////////////// | 595 | //////////////////////////////////////////////////////////////////// |
592 | // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) | 596 | // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) |
593 | ENTRY(kvm_single_step_trap) | 597 | ENTRY(kvm_single_step_trap) |
594 | KVM_REFLECT(36) | 598 | KVM_REFLECT(36) |
595 | END(kvm_single_step_trap) | 599 | END(kvm_single_step_trap) |
596 | .global kvm_virtualization_fault_back | 600 | .global kvm_virtualization_fault_back |
597 | .org kvm_ia64_ivt+0x6100 | 601 | .org kvm_ia64_ivt+0x6100 |
598 | ///////////////////////////////////////////////////////////////////// | 602 | ///////////////////////////////////////////////////////////////////// |
599 | // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault | 603 | // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault |
600 | ENTRY(kvm_virtualization_fault) | 604 | ENTRY(kvm_virtualization_fault) |
601 | mov r31=pr | 605 | mov r31=pr |
602 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | 606 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 |
603 | ;; | 607 | ;; |
604 | st8 [r16] = r1 | 608 | st8 [r16] = r1 |
605 | adds r17 = VMM_VCPU_GP_OFFSET, r21 | 609 | adds r17 = VMM_VCPU_GP_OFFSET, r21 |
606 | ;; | 610 | ;; |
607 | ld8 r1 = [r17] | 611 | ld8 r1 = [r17] |
608 | cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24 | 612 | cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24 |
609 | cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24 | 613 | cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24 |
610 | cmp.eq p8,p0=EVENT_MOV_TO_RR,r24 | 614 | cmp.eq p8,p0=EVENT_MOV_TO_RR,r24 |
611 | cmp.eq p9,p0=EVENT_RSM,r24 | 615 | cmp.eq p9,p0=EVENT_RSM,r24 |
612 | cmp.eq p10,p0=EVENT_SSM,r24 | 616 | cmp.eq p10,p0=EVENT_SSM,r24 |
613 | cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 | 617 | cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 |
614 | cmp.eq p12,p0=EVENT_THASH,r24 | 618 | cmp.eq p12,p0=EVENT_THASH,r24 |
615 | (p6) br.dptk.many kvm_asm_mov_from_ar | 619 | (p6) br.dptk.many kvm_asm_mov_from_ar |
616 | (p7) br.dptk.many kvm_asm_mov_from_rr | 620 | (p7) br.dptk.many kvm_asm_mov_from_rr |
617 | (p8) br.dptk.many kvm_asm_mov_to_rr | 621 | (p8) br.dptk.many kvm_asm_mov_to_rr |
618 | (p9) br.dptk.many kvm_asm_rsm | 622 | (p9) br.dptk.many kvm_asm_rsm |
619 | (p10) br.dptk.many kvm_asm_ssm | 623 | (p10) br.dptk.many kvm_asm_ssm |
620 | (p11) br.dptk.many kvm_asm_mov_to_psr | 624 | (p11) br.dptk.many kvm_asm_mov_to_psr |
621 | (p12) br.dptk.many kvm_asm_thash | 625 | (p12) br.dptk.many kvm_asm_thash |
622 | ;; | 626 | ;; |
623 | kvm_virtualization_fault_back: | 627 | kvm_virtualization_fault_back: |
624 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | 628 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 |
625 | ;; | 629 | ;; |
626 | ld8 r1 = [r16] | 630 | ld8 r1 = [r16] |
627 | ;; | 631 | ;; |
628 | mov r19=37 | 632 | mov r19=37 |
629 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 | 633 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 |
630 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 | 634 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 |
631 | ;; | 635 | ;; |
632 | st8 [r16] = r24 | 636 | st8 [r16] = r24 |
633 | st8 [r17] = r25 | 637 | st8 [r17] = r25 |
634 | ;; | 638 | ;; |
635 | cmp.ne p6,p0=EVENT_RFI, r24 | 639 | cmp.ne p6,p0=EVENT_RFI, r24 |
636 | (p6) br.sptk kvm_dispatch_virtualization_fault | 640 | (p6) br.sptk kvm_dispatch_virtualization_fault |
637 | ;; | 641 | ;; |
638 | adds r18=VMM_VPD_BASE_OFFSET,r21 | 642 | adds r18=VMM_VPD_BASE_OFFSET,r21 |
639 | ;; | 643 | ;; |
640 | ld8 r18=[r18] | 644 | ld8 r18=[r18] |
641 | ;; | 645 | ;; |
642 | adds r18=VMM_VPD_VIFS_OFFSET,r18 | 646 | adds r18=VMM_VPD_VIFS_OFFSET,r18 |
643 | ;; | 647 | ;; |
644 | ld8 r18=[r18] | 648 | ld8 r18=[r18] |
645 | ;; | 649 | ;; |
646 | tbit.z p6,p0=r18,63 | 650 | tbit.z p6,p0=r18,63 |
647 | (p6) br.sptk kvm_dispatch_virtualization_fault | 651 | (p6) br.sptk kvm_dispatch_virtualization_fault |
648 | ;; | 652 | ;; |
649 | //if vifs.v=1 desert current register frame | 653 | //if vifs.v=1 desert current register frame |
650 | alloc r18=ar.pfs,0,0,0,0 | 654 | alloc r18=ar.pfs,0,0,0,0 |
651 | br.sptk kvm_dispatch_virtualization_fault | 655 | br.sptk kvm_dispatch_virtualization_fault |
652 | END(kvm_virtualization_fault) | 656 | END(kvm_virtualization_fault) |
653 | 657 | ||
654 | .org kvm_ia64_ivt+0x6200 | 658 | .org kvm_ia64_ivt+0x6200 |
655 | ////////////////////////////////////////////////////////////// | 659 | ////////////////////////////////////////////////////////////// |
656 | // 0x6200 Entry 38 (size 16 bundles) Reserved | 660 | // 0x6200 Entry 38 (size 16 bundles) Reserved |
657 | KVM_FAULT(38) | 661 | KVM_FAULT(38) |
658 | 662 | ||
659 | .org kvm_ia64_ivt+0x6300 | 663 | .org kvm_ia64_ivt+0x6300 |
660 | ///////////////////////////////////////////////////////////////// | 664 | ///////////////////////////////////////////////////////////////// |
661 | // 0x6300 Entry 39 (size 16 bundles) Reserved | 665 | // 0x6300 Entry 39 (size 16 bundles) Reserved |
662 | KVM_FAULT(39) | 666 | KVM_FAULT(39) |
663 | 667 | ||
664 | .org kvm_ia64_ivt+0x6400 | 668 | .org kvm_ia64_ivt+0x6400 |
665 | ///////////////////////////////////////////////////////////////// | 669 | ///////////////////////////////////////////////////////////////// |
666 | // 0x6400 Entry 40 (size 16 bundles) Reserved | 670 | // 0x6400 Entry 40 (size 16 bundles) Reserved |
667 | KVM_FAULT(40) | 671 | KVM_FAULT(40) |
668 | 672 | ||
669 | .org kvm_ia64_ivt+0x6500 | 673 | .org kvm_ia64_ivt+0x6500 |
670 | ////////////////////////////////////////////////////////////////// | 674 | ////////////////////////////////////////////////////////////////// |
671 | // 0x6500 Entry 41 (size 16 bundles) Reserved | 675 | // 0x6500 Entry 41 (size 16 bundles) Reserved |
672 | KVM_FAULT(41) | 676 | KVM_FAULT(41) |
673 | 677 | ||
674 | .org kvm_ia64_ivt+0x6600 | 678 | .org kvm_ia64_ivt+0x6600 |
675 | ////////////////////////////////////////////////////////////////// | 679 | ////////////////////////////////////////////////////////////////// |
676 | // 0x6600 Entry 42 (size 16 bundles) Reserved | 680 | // 0x6600 Entry 42 (size 16 bundles) Reserved |
677 | KVM_FAULT(42) | 681 | KVM_FAULT(42) |
678 | 682 | ||
679 | .org kvm_ia64_ivt+0x6700 | 683 | .org kvm_ia64_ivt+0x6700 |
680 | ////////////////////////////////////////////////////////////////// | 684 | ////////////////////////////////////////////////////////////////// |
681 | // 0x6700 Entry 43 (size 16 bundles) Reserved | 685 | // 0x6700 Entry 43 (size 16 bundles) Reserved |
682 | KVM_FAULT(43) | 686 | KVM_FAULT(43) |
683 | 687 | ||
684 | .org kvm_ia64_ivt+0x6800 | 688 | .org kvm_ia64_ivt+0x6800 |
685 | ////////////////////////////////////////////////////////////////// | 689 | ////////////////////////////////////////////////////////////////// |
686 | // 0x6800 Entry 44 (size 16 bundles) Reserved | 690 | // 0x6800 Entry 44 (size 16 bundles) Reserved |
687 | KVM_FAULT(44) | 691 | KVM_FAULT(44) |
688 | 692 | ||
689 | .org kvm_ia64_ivt+0x6900 | 693 | .org kvm_ia64_ivt+0x6900 |
690 | /////////////////////////////////////////////////////////////////// | 694 | /////////////////////////////////////////////////////////////////// |
691 | // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception | 695 | // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception |
692 | //(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) | 696 | //(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) |
693 | ENTRY(kvm_ia32_exception) | 697 | ENTRY(kvm_ia32_exception) |
694 | KVM_FAULT(45) | 698 | KVM_FAULT(45) |
695 | END(kvm_ia32_exception) | 699 | END(kvm_ia32_exception) |
696 | 700 | ||
697 | .org kvm_ia64_ivt+0x6a00 | 701 | .org kvm_ia64_ivt+0x6a00 |
698 | //////////////////////////////////////////////////////////////////// | 702 | //////////////////////////////////////////////////////////////////// |
699 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) | 703 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) |
700 | ENTRY(kvm_ia32_intercept) | 704 | ENTRY(kvm_ia32_intercept) |
701 | KVM_FAULT(47) | 705 | KVM_FAULT(47) |
702 | END(kvm_ia32_intercept) | 706 | END(kvm_ia32_intercept) |
703 | 707 | ||
704 | .org kvm_ia64_ivt+0x6c00 | 708 | .org kvm_ia64_ivt+0x6c00 |
705 | ///////////////////////////////////////////////////////////////////// | 709 | ///////////////////////////////////////////////////////////////////// |
706 | // 0x6c00 Entry 48 (size 16 bundles) Reserved | 710 | // 0x6c00 Entry 48 (size 16 bundles) Reserved |
707 | KVM_FAULT(48) | 711 | KVM_FAULT(48) |
708 | 712 | ||
709 | .org kvm_ia64_ivt+0x6d00 | 713 | .org kvm_ia64_ivt+0x6d00 |
710 | ////////////////////////////////////////////////////////////////////// | 714 | ////////////////////////////////////////////////////////////////////// |
711 | // 0x6d00 Entry 49 (size 16 bundles) Reserved | 715 | // 0x6d00 Entry 49 (size 16 bundles) Reserved |
712 | KVM_FAULT(49) | 716 | KVM_FAULT(49) |
713 | 717 | ||
714 | .org kvm_ia64_ivt+0x6e00 | 718 | .org kvm_ia64_ivt+0x6e00 |
715 | ////////////////////////////////////////////////////////////////////// | 719 | ////////////////////////////////////////////////////////////////////// |
716 | // 0x6e00 Entry 50 (size 16 bundles) Reserved | 720 | // 0x6e00 Entry 50 (size 16 bundles) Reserved |
717 | KVM_FAULT(50) | 721 | KVM_FAULT(50) |
718 | 722 | ||
719 | .org kvm_ia64_ivt+0x6f00 | 723 | .org kvm_ia64_ivt+0x6f00 |
720 | ///////////////////////////////////////////////////////////////////// | 724 | ///////////////////////////////////////////////////////////////////// |
721 | // 0x6f00 Entry 51 (size 16 bundles) Reserved | 725 | // 0x6f00 Entry 51 (size 16 bundles) Reserved |
722 | KVM_FAULT(52) | 726 | KVM_FAULT(52) |
723 | 727 | ||
724 | .org kvm_ia64_ivt+0x7100 | 728 | .org kvm_ia64_ivt+0x7100 |
725 | //////////////////////////////////////////////////////////////////// | 729 | //////////////////////////////////////////////////////////////////// |
726 | // 0x7100 Entry 53 (size 16 bundles) Reserved | 730 | // 0x7100 Entry 53 (size 16 bundles) Reserved |
727 | KVM_FAULT(53) | 731 | KVM_FAULT(53) |
728 | 732 | ||
729 | .org kvm_ia64_ivt+0x7200 | 733 | .org kvm_ia64_ivt+0x7200 |
730 | ///////////////////////////////////////////////////////////////////// | 734 | ///////////////////////////////////////////////////////////////////// |
731 | // 0x7200 Entry 54 (size 16 bundles) Reserved | 735 | // 0x7200 Entry 54 (size 16 bundles) Reserved |
732 | KVM_FAULT(54) | 736 | KVM_FAULT(54) |
733 | 737 | ||
734 | .org kvm_ia64_ivt+0x7300 | 738 | .org kvm_ia64_ivt+0x7300 |
735 | //////////////////////////////////////////////////////////////////// | 739 | //////////////////////////////////////////////////////////////////// |
736 | // 0x7300 Entry 55 (size 16 bundles) Reserved | 740 | // 0x7300 Entry 55 (size 16 bundles) Reserved |
737 | KVM_FAULT(55) | 741 | KVM_FAULT(55) |
738 | 742 | ||
739 | .org kvm_ia64_ivt+0x7400 | 743 | .org kvm_ia64_ivt+0x7400 |
740 | //////////////////////////////////////////////////////////////////// | 744 | //////////////////////////////////////////////////////////////////// |
741 | // 0x7400 Entry 56 (size 16 bundles) Reserved | 745 | // 0x7400 Entry 56 (size 16 bundles) Reserved |
742 | KVM_FAULT(56) | 746 | KVM_FAULT(56) |
743 | 747 | ||
744 | .org kvm_ia64_ivt+0x7500 | 748 | .org kvm_ia64_ivt+0x7500 |
745 | ///////////////////////////////////////////////////////////////////// | 749 | ///////////////////////////////////////////////////////////////////// |
746 | // 0x7500 Entry 57 (size 16 bundles) Reserved | 750 | // 0x7500 Entry 57 (size 16 bundles) Reserved |
747 | KVM_FAULT(57) | 751 | KVM_FAULT(57) |
748 | 752 | ||
749 | .org kvm_ia64_ivt+0x7600 | 753 | .org kvm_ia64_ivt+0x7600 |
750 | ///////////////////////////////////////////////////////////////////// | 754 | ///////////////////////////////////////////////////////////////////// |
751 | // 0x7600 Entry 58 (size 16 bundles) Reserved | 755 | // 0x7600 Entry 58 (size 16 bundles) Reserved |
752 | KVM_FAULT(58) | 756 | KVM_FAULT(58) |
753 | 757 | ||
754 | .org kvm_ia64_ivt+0x7700 | 758 | .org kvm_ia64_ivt+0x7700 |
755 | //////////////////////////////////////////////////////////////////// | 759 | //////////////////////////////////////////////////////////////////// |
756 | // 0x7700 Entry 59 (size 16 bundles) Reserved | 760 | // 0x7700 Entry 59 (size 16 bundles) Reserved |
757 | KVM_FAULT(59) | 761 | KVM_FAULT(59) |
758 | 762 | ||
759 | .org kvm_ia64_ivt+0x7800 | 763 | .org kvm_ia64_ivt+0x7800 |
760 | //////////////////////////////////////////////////////////////////// | 764 | //////////////////////////////////////////////////////////////////// |
761 | // 0x7800 Entry 60 (size 16 bundles) Reserved | 765 | // 0x7800 Entry 60 (size 16 bundles) Reserved |
762 | KVM_FAULT(60) | 766 | KVM_FAULT(60) |
763 | 767 | ||
764 | .org kvm_ia64_ivt+0x7900 | 768 | .org kvm_ia64_ivt+0x7900 |
765 | ///////////////////////////////////////////////////////////////////// | 769 | ///////////////////////////////////////////////////////////////////// |
766 | // 0x7900 Entry 61 (size 16 bundles) Reserved | 770 | // 0x7900 Entry 61 (size 16 bundles) Reserved |
767 | KVM_FAULT(61) | 771 | KVM_FAULT(61) |
768 | 772 | ||
769 | .org kvm_ia64_ivt+0x7a00 | 773 | .org kvm_ia64_ivt+0x7a00 |
770 | ///////////////////////////////////////////////////////////////////// | 774 | ///////////////////////////////////////////////////////////////////// |
771 | // 0x7a00 Entry 62 (size 16 bundles) Reserved | 775 | // 0x7a00 Entry 62 (size 16 bundles) Reserved |
772 | KVM_FAULT(62) | 776 | KVM_FAULT(62) |
773 | 777 | ||
774 | .org kvm_ia64_ivt+0x7b00 | 778 | .org kvm_ia64_ivt+0x7b00 |
775 | ///////////////////////////////////////////////////////////////////// | 779 | ///////////////////////////////////////////////////////////////////// |
776 | // 0x7b00 Entry 63 (size 16 bundles) Reserved | 780 | // 0x7b00 Entry 63 (size 16 bundles) Reserved |
777 | KVM_FAULT(63) | 781 | KVM_FAULT(63) |
778 | 782 | ||
779 | .org kvm_ia64_ivt+0x7c00 | 783 | .org kvm_ia64_ivt+0x7c00 |
780 | //////////////////////////////////////////////////////////////////// | 784 | //////////////////////////////////////////////////////////////////// |
781 | // 0x7c00 Entry 64 (size 16 bundles) Reserved | 785 | // 0x7c00 Entry 64 (size 16 bundles) Reserved |
782 | KVM_FAULT(64) | 786 | KVM_FAULT(64) |
783 | 787 | ||
784 | .org kvm_ia64_ivt+0x7d00 | 788 | .org kvm_ia64_ivt+0x7d00 |
785 | ///////////////////////////////////////////////////////////////////// | 789 | ///////////////////////////////////////////////////////////////////// |
786 | // 0x7d00 Entry 65 (size 16 bundles) Reserved | 790 | // 0x7d00 Entry 65 (size 16 bundles) Reserved |
787 | KVM_FAULT(65) | 791 | KVM_FAULT(65) |
788 | 792 | ||
789 | .org kvm_ia64_ivt+0x7e00 | 793 | .org kvm_ia64_ivt+0x7e00 |
790 | ///////////////////////////////////////////////////////////////////// | 794 | ///////////////////////////////////////////////////////////////////// |
791 | // 0x7e00 Entry 66 (size 16 bundles) Reserved | 795 | // 0x7e00 Entry 66 (size 16 bundles) Reserved |
792 | KVM_FAULT(66) | 796 | KVM_FAULT(66) |
793 | 797 | ||
794 | .org kvm_ia64_ivt+0x7f00 | 798 | .org kvm_ia64_ivt+0x7f00 |
795 | //////////////////////////////////////////////////////////////////// | 799 | //////////////////////////////////////////////////////////////////// |
796 | // 0x7f00 Entry 67 (size 16 bundles) Reserved | 800 | // 0x7f00 Entry 67 (size 16 bundles) Reserved |
797 | KVM_FAULT(67) | 801 | KVM_FAULT(67) |
798 | 802 | ||
799 | .org kvm_ia64_ivt+0x8000 | 803 | .org kvm_ia64_ivt+0x8000 |
800 | // There is no particular reason for this code to be here, other than that | 804 | // There is no particular reason for this code to be here, other than that |
@@ -804,132 +808,128 @@ END(kvm_ia32_intercept) | |||
804 | 808 | ||
805 | 809 | ||
806 | ENTRY(kvm_dtlb_miss_dispatch) | 810 | ENTRY(kvm_dtlb_miss_dispatch) |
807 | mov r19 = 2 | 811 | mov r19 = 2 |
808 | KVM_SAVE_MIN_WITH_COVER_R19 | 812 | KVM_SAVE_MIN_WITH_COVER_R19 |
809 | alloc r14=ar.pfs,0,0,3,0 | 813 | alloc r14=ar.pfs,0,0,3,0 |
810 | mov out0=cr.ifa | 814 | mov out0=cr.ifa |
811 | mov out1=r15 | 815 | mov out1=r15 |
812 | adds r3=8,r2 // set up second base pointer | 816 | adds r3=8,r2 // set up second base pointer |
813 | ;; | 817 | ;; |
814 | ssm psr.ic | 818 | ssm psr.ic |
815 | ;; | 819 | ;; |
816 | srlz.i // guarantee that interruption collection is on | 820 | srlz.i // guarantee that interruption collection is on |
817 | ;; | 821 | ;; |
818 | //(p15) ssm psr.i // restore psr.i | 822 | //(p15) ssm psr.i // restore psr.i |
819 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp | 823 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp |
820 | ;; | 824 | ;; |
821 | KVM_SAVE_REST | 825 | KVM_SAVE_REST |
822 | KVM_SAVE_EXTRA | 826 | KVM_SAVE_EXTRA |
823 | mov rp=r14 | 827 | mov rp=r14 |
824 | ;; | 828 | ;; |
825 | adds out2=16,r12 | 829 | adds out2=16,r12 |
826 | br.call.sptk.many b6=kvm_page_fault | 830 | br.call.sptk.many b6=kvm_page_fault |
827 | END(kvm_dtlb_miss_dispatch) | 831 | END(kvm_dtlb_miss_dispatch) |
828 | 832 | ||
829 | ENTRY(kvm_itlb_miss_dispatch) | 833 | ENTRY(kvm_itlb_miss_dispatch) |
830 | 834 | ||
831 | KVM_SAVE_MIN_WITH_COVER_R19 | 835 | KVM_SAVE_MIN_WITH_COVER_R19 |
832 | alloc r14=ar.pfs,0,0,3,0 | 836 | alloc r14=ar.pfs,0,0,3,0 |
833 | mov out0=cr.ifa | 837 | mov out0=cr.ifa |
834 | mov out1=r15 | 838 | mov out1=r15 |
835 | adds r3=8,r2 // set up second base pointer | 839 | adds r3=8,r2 // set up second base pointer |
836 | ;; | 840 | ;; |
837 | ssm psr.ic | 841 | ssm psr.ic |
838 | ;; | 842 | ;; |
839 | srlz.i // guarantee that interruption collection is on | 843 | srlz.i // guarantee that interruption collection is on |
840 | ;; | 844 | ;; |
841 | //(p15) ssm psr.i // restore psr.i | 845 | //(p15) ssm psr.i // restore psr.i |
842 | addl r14=@gprel(ia64_leave_hypervisor),gp | 846 | addl r14=@gprel(ia64_leave_hypervisor),gp |
843 | ;; | 847 | ;; |
844 | KVM_SAVE_REST | 848 | KVM_SAVE_REST |
845 | mov rp=r14 | 849 | mov rp=r14 |
846 | ;; | 850 | ;; |
847 | adds out2=16,r12 | 851 | adds out2=16,r12 |
848 | br.call.sptk.many b6=kvm_page_fault | 852 | br.call.sptk.many b6=kvm_page_fault |
849 | END(kvm_itlb_miss_dispatch) | 853 | END(kvm_itlb_miss_dispatch) |
850 | 854 | ||
851 | ENTRY(kvm_dispatch_reflection) | 855 | ENTRY(kvm_dispatch_reflection) |
852 | /* | 856 | /* |
853 | * Input: | 857 | * Input: |
854 | * psr.ic: off | 858 | * psr.ic: off |
855 | * r19: intr type (offset into ivt, see ia64_int.h) | 859 | * r19: intr type (offset into ivt, see ia64_int.h) |
856 | * r31: contains saved predicates (pr) | 860 | * r31: contains saved predicates (pr) |
857 | */ | 861 | */ |
858 | KVM_SAVE_MIN_WITH_COVER_R19 | 862 | KVM_SAVE_MIN_WITH_COVER_R19 |
859 | alloc r14=ar.pfs,0,0,5,0 | 863 | alloc r14=ar.pfs,0,0,5,0 |
860 | mov out0=cr.ifa | 864 | mov out0=cr.ifa |
861 | mov out1=cr.isr | 865 | mov out1=cr.isr |
862 | mov out2=cr.iim | 866 | mov out2=cr.iim |
863 | mov out3=r15 | 867 | mov out3=r15 |
864 | adds r3=8,r2 // set up second base pointer | 868 | adds r3=8,r2 // set up second base pointer |
865 | ;; | 869 | ;; |
866 | ssm psr.ic | 870 | ssm psr.ic |
867 | ;; | 871 | ;; |
868 | srlz.i // guarantee that interruption collection is on | 872 | srlz.i // guarantee that interruption collection is on |
869 | ;; | 873 | ;; |
870 | //(p15) ssm psr.i // restore psr.i | 874 | //(p15) ssm psr.i // restore psr.i |
871 | addl r14=@gprel(ia64_leave_hypervisor),gp | 875 | addl r14=@gprel(ia64_leave_hypervisor),gp |
872 | ;; | 876 | ;; |
873 | KVM_SAVE_REST | 877 | KVM_SAVE_REST |
874 | mov rp=r14 | 878 | mov rp=r14 |
875 | ;; | 879 | ;; |
876 | adds out4=16,r12 | 880 | adds out4=16,r12 |
877 | br.call.sptk.many b6=reflect_interruption | 881 | br.call.sptk.many b6=reflect_interruption |
878 | END(kvm_dispatch_reflection) | 882 | END(kvm_dispatch_reflection) |
879 | 883 | ||
880 | ENTRY(kvm_dispatch_virtualization_fault) | 884 | ENTRY(kvm_dispatch_virtualization_fault) |
881 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 | 885 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 |
882 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 | 886 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 |
883 | ;; | 887 | ;; |
884 | st8 [r16] = r24 | 888 | st8 [r16] = r24 |
885 | st8 [r17] = r25 | 889 | st8 [r17] = r25 |
886 | ;; | 890 | ;; |
887 | KVM_SAVE_MIN_WITH_COVER_R19 | 891 | KVM_SAVE_MIN_WITH_COVER_R19 |
888 | ;; | 892 | ;; |
889 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) | 893 | alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!) |
890 | mov out0=r13 //vcpu | 894 | mov out0=r13 //vcpu |
891 | adds r3=8,r2 // set up second base pointer | 895 | adds r3=8,r2 // set up second base pointer |
892 | ;; | 896 | ;; |
893 | ssm psr.ic | 897 | ssm psr.ic |
894 | ;; | 898 | ;; |
895 | srlz.i // guarantee that interruption collection is on | 899 | srlz.i // guarantee that interruption collection is on |
896 | ;; | 900 | ;; |
897 | //(p15) ssm psr.i // restore psr.i | 901 | //(p15) ssm psr.i // restore psr.i |
898 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp | 902 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp |
899 | ;; | 903 | ;; |
900 | KVM_SAVE_REST | 904 | KVM_SAVE_REST |
901 | KVM_SAVE_EXTRA | 905 | KVM_SAVE_EXTRA |
902 | mov rp=r14 | 906 | mov rp=r14 |
903 | ;; | 907 | ;; |
904 | adds out1=16,sp //regs | 908 | adds out1=16,sp //regs |
905 | br.call.sptk.many b6=kvm_emulate | 909 | br.call.sptk.many b6=kvm_emulate |
906 | END(kvm_dispatch_virtualization_fault) | 910 | END(kvm_dispatch_virtualization_fault) |
907 | 911 | ||
908 | 912 | ||
909 | ENTRY(kvm_dispatch_interrupt) | 913 | ENTRY(kvm_dispatch_interrupt) |
910 | KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 | 914 | KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 |
911 | ;; | 915 | ;; |
912 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group | 916 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group |
913 | //mov out0=cr.ivr // pass cr.ivr as first arg | 917 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
914 | adds r3=8,r2 // set up second base pointer for SAVE_REST | 918 | ;; |
915 | ;; | 919 | ssm psr.ic |
916 | ssm psr.ic | 920 | ;; |
917 | ;; | 921 | srlz.i |
918 | srlz.i | 922 | ;; |
919 | ;; | 923 | //(p15) ssm psr.i |
920 | //(p15) ssm psr.i | 924 | addl r14=@gprel(ia64_leave_hypervisor),gp |
921 | addl r14=@gprel(ia64_leave_hypervisor),gp | 925 | ;; |
922 | ;; | 926 | KVM_SAVE_REST |
923 | KVM_SAVE_REST | 927 | mov rp=r14 |
924 | mov rp=r14 | 928 | ;; |
925 | ;; | 929 | mov out0=r13 // pass pointer to pt_regs as second arg |
926 | mov out0=r13 // pass pointer to pt_regs as second arg | 930 | br.call.sptk.many b6=kvm_ia64_handle_irq |
927 | br.call.sptk.many b6=kvm_ia64_handle_irq | ||
928 | END(kvm_dispatch_interrupt) | 931 | END(kvm_dispatch_interrupt) |
929 | 932 | ||
930 | |||
931 | |||
932 | |||
933 | GLOBAL_ENTRY(ia64_leave_nested) | 933 | GLOBAL_ENTRY(ia64_leave_nested) |
934 | rsm psr.i | 934 | rsm psr.i |
935 | ;; | 935 | ;; |
@@ -1008,7 +1008,7 @@ GLOBAL_ENTRY(ia64_leave_nested) | |||
1008 | ;; | 1008 | ;; |
1009 | ldf.fill f11=[r2] | 1009 | ldf.fill f11=[r2] |
1010 | // mov r18=r13 | 1010 | // mov r18=r13 |
1011 | // mov r21=r13 | 1011 | // mov r21=r13 |
1012 | adds r16=PT(CR_IPSR)+16,r12 | 1012 | adds r16=PT(CR_IPSR)+16,r12 |
1013 | adds r17=PT(CR_IIP)+16,r12 | 1013 | adds r17=PT(CR_IIP)+16,r12 |
1014 | ;; | 1014 | ;; |
@@ -1058,138 +1058,135 @@ GLOBAL_ENTRY(ia64_leave_nested) | |||
1058 | rfi | 1058 | rfi |
1059 | END(ia64_leave_nested) | 1059 | END(ia64_leave_nested) |
1060 | 1060 | ||
1061 | |||
1062 | |||
1063 | GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) | 1061 | GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) |
1064 | /* | 1062 | /* |
1065 | * work.need_resched etc. mustn't get changed | 1063 | * work.need_resched etc. mustn't get changed |
1066 | *by this CPU before it returns to | 1064 | *by this CPU before it returns to |
1067 | ;; | 1065 | * user- or fsys-mode, hence we disable interrupts early on: |
1068 | * user- or fsys-mode, hence we disable interrupts early on: | 1066 | */ |
1069 | */ | 1067 | adds r2 = PT(R4)+16,r12 |
1070 | adds r2 = PT(R4)+16,r12 | 1068 | adds r3 = PT(R5)+16,r12 |
1071 | adds r3 = PT(R5)+16,r12 | 1069 | adds r8 = PT(EML_UNAT)+16,r12 |
1072 | adds r8 = PT(EML_UNAT)+16,r12 | 1070 | ;; |
1073 | ;; | 1071 | ld8 r8 = [r8] |
1074 | ld8 r8 = [r8] | 1072 | ;; |
1075 | ;; | 1073 | mov ar.unat=r8 |
1076 | mov ar.unat=r8 | 1074 | ;; |
1077 | ;; | 1075 | ld8.fill r4=[r2],16 //load r4 |
1078 | ld8.fill r4=[r2],16 //load r4 | 1076 | ld8.fill r5=[r3],16 //load r5 |
1079 | ld8.fill r5=[r3],16 //load r5 | 1077 | ;; |
1080 | ;; | 1078 | ld8.fill r6=[r2] //load r6 |
1081 | ld8.fill r6=[r2] //load r6 | 1079 | ld8.fill r7=[r3] //load r7 |
1082 | ld8.fill r7=[r3] //load r7 | 1080 | ;; |
1083 | ;; | ||
1084 | END(ia64_leave_hypervisor_prepare) | 1081 | END(ia64_leave_hypervisor_prepare) |
1085 | //fall through | 1082 | //fall through |
1086 | GLOBAL_ENTRY(ia64_leave_hypervisor) | 1083 | GLOBAL_ENTRY(ia64_leave_hypervisor) |
1087 | rsm psr.i | 1084 | rsm psr.i |
1088 | ;; | 1085 | ;; |
1089 | br.call.sptk.many b0=leave_hypervisor_tail | 1086 | br.call.sptk.many b0=leave_hypervisor_tail |
1090 | ;; | 1087 | ;; |
1091 | adds r20=PT(PR)+16,r12 | 1088 | adds r20=PT(PR)+16,r12 |
1092 | adds r8=PT(EML_UNAT)+16,r12 | 1089 | adds r8=PT(EML_UNAT)+16,r12 |
1093 | ;; | 1090 | ;; |
1094 | ld8 r8=[r8] | 1091 | ld8 r8=[r8] |
1095 | ;; | 1092 | ;; |
1096 | mov ar.unat=r8 | 1093 | mov ar.unat=r8 |
1097 | ;; | 1094 | ;; |
1098 | lfetch [r20],PT(CR_IPSR)-PT(PR) | 1095 | lfetch [r20],PT(CR_IPSR)-PT(PR) |
1099 | adds r2 = PT(B6)+16,r12 | 1096 | adds r2 = PT(B6)+16,r12 |
1100 | adds r3 = PT(B7)+16,r12 | 1097 | adds r3 = PT(B7)+16,r12 |
1101 | ;; | 1098 | ;; |
1102 | lfetch [r20] | 1099 | lfetch [r20] |
1103 | ;; | 1100 | ;; |
1104 | ld8 r24=[r2],16 /* B6 */ | 1101 | ld8 r24=[r2],16 /* B6 */ |
1105 | ld8 r25=[r3],16 /* B7 */ | 1102 | ld8 r25=[r3],16 /* B7 */ |
1106 | ;; | 1103 | ;; |
1107 | ld8 r26=[r2],16 /* ar_csd */ | 1104 | ld8 r26=[r2],16 /* ar_csd */ |
1108 | ld8 r27=[r3],16 /* ar_ssd */ | 1105 | ld8 r27=[r3],16 /* ar_ssd */ |
1109 | mov b6 = r24 | 1106 | mov b6 = r24 |
1110 | ;; | 1107 | ;; |
1111 | ld8.fill r8=[r2],16 | 1108 | ld8.fill r8=[r2],16 |
1112 | ld8.fill r9=[r3],16 | 1109 | ld8.fill r9=[r3],16 |
1113 | mov b7 = r25 | 1110 | mov b7 = r25 |
1114 | ;; | 1111 | ;; |
1115 | mov ar.csd = r26 | 1112 | mov ar.csd = r26 |
1116 | mov ar.ssd = r27 | 1113 | mov ar.ssd = r27 |
1117 | ;; | 1114 | ;; |
1118 | ld8.fill r10=[r2],PT(R15)-PT(R10) | 1115 | ld8.fill r10=[r2],PT(R15)-PT(R10) |
1119 | ld8.fill r11=[r3],PT(R14)-PT(R11) | 1116 | ld8.fill r11=[r3],PT(R14)-PT(R11) |
1120 | ;; | 1117 | ;; |
1121 | ld8.fill r15=[r2],PT(R16)-PT(R15) | 1118 | ld8.fill r15=[r2],PT(R16)-PT(R15) |
1122 | ld8.fill r14=[r3],PT(R17)-PT(R14) | 1119 | ld8.fill r14=[r3],PT(R17)-PT(R14) |
1123 | ;; | 1120 | ;; |
1124 | ld8.fill r16=[r2],16 | 1121 | ld8.fill r16=[r2],16 |
1125 | ld8.fill r17=[r3],16 | 1122 | ld8.fill r17=[r3],16 |
1126 | ;; | 1123 | ;; |
1127 | ld8.fill r18=[r2],16 | 1124 | ld8.fill r18=[r2],16 |
1128 | ld8.fill r19=[r3],16 | 1125 | ld8.fill r19=[r3],16 |
1129 | ;; | 1126 | ;; |
1130 | ld8.fill r20=[r2],16 | 1127 | ld8.fill r20=[r2],16 |
1131 | ld8.fill r21=[r3],16 | 1128 | ld8.fill r21=[r3],16 |
1132 | ;; | 1129 | ;; |
1133 | ld8.fill r22=[r2],16 | 1130 | ld8.fill r22=[r2],16 |
1134 | ld8.fill r23=[r3],16 | 1131 | ld8.fill r23=[r3],16 |
1135 | ;; | 1132 | ;; |
1136 | ld8.fill r24=[r2],16 | 1133 | ld8.fill r24=[r2],16 |
1137 | ld8.fill r25=[r3],16 | 1134 | ld8.fill r25=[r3],16 |
1138 | ;; | 1135 | ;; |
1139 | ld8.fill r26=[r2],16 | 1136 | ld8.fill r26=[r2],16 |
1140 | ld8.fill r27=[r3],16 | 1137 | ld8.fill r27=[r3],16 |
1141 | ;; | 1138 | ;; |
1142 | ld8.fill r28=[r2],16 | 1139 | ld8.fill r28=[r2],16 |
1143 | ld8.fill r29=[r3],16 | 1140 | ld8.fill r29=[r3],16 |
1144 | ;; | 1141 | ;; |
1145 | ld8.fill r30=[r2],PT(F6)-PT(R30) | 1142 | ld8.fill r30=[r2],PT(F6)-PT(R30) |
1146 | ld8.fill r31=[r3],PT(F7)-PT(R31) | 1143 | ld8.fill r31=[r3],PT(F7)-PT(R31) |
1147 | ;; | 1144 | ;; |
1148 | rsm psr.i | psr.ic | 1145 | rsm psr.i | psr.ic |
1149 | // initiate turning off of interrupt and interruption collection | 1146 | // initiate turning off of interrupt and interruption collection |
1150 | invala // invalidate ALAT | 1147 | invala // invalidate ALAT |
1151 | ;; | 1148 | ;; |
1152 | srlz.i // ensure interruption collection is off | 1149 | srlz.i // ensure interruption collection is off |
1153 | ;; | 1150 | ;; |
1154 | bsw.0 | 1151 | bsw.0 |
1155 | ;; | 1152 | ;; |
1156 | adds r16 = PT(CR_IPSR)+16,r12 | 1153 | adds r16 = PT(CR_IPSR)+16,r12 |
1157 | adds r17 = PT(CR_IIP)+16,r12 | 1154 | adds r17 = PT(CR_IIP)+16,r12 |
1158 | mov r21=r13 // get current | 1155 | mov r21=r13 // get current |
1159 | ;; | 1156 | ;; |
1160 | ld8 r31=[r16],16 // load cr.ipsr | 1157 | ld8 r31=[r16],16 // load cr.ipsr |
1161 | ld8 r30=[r17],16 // load cr.iip | 1158 | ld8 r30=[r17],16 // load cr.iip |
1162 | ;; | 1159 | ;; |
1163 | ld8 r29=[r16],16 // load cr.ifs | 1160 | ld8 r29=[r16],16 // load cr.ifs |
1164 | ld8 r28=[r17],16 // load ar.unat | 1161 | ld8 r28=[r17],16 // load ar.unat |
1165 | ;; | 1162 | ;; |
1166 | ld8 r27=[r16],16 // load ar.pfs | 1163 | ld8 r27=[r16],16 // load ar.pfs |
1167 | ld8 r26=[r17],16 // load ar.rsc | 1164 | ld8 r26=[r17],16 // load ar.rsc |
1168 | ;; | 1165 | ;; |
1169 | ld8 r25=[r16],16 // load ar.rnat | 1166 | ld8 r25=[r16],16 // load ar.rnat |
1170 | ld8 r24=[r17],16 // load ar.bspstore | 1167 | ld8 r24=[r17],16 // load ar.bspstore |
1171 | ;; | 1168 | ;; |
1172 | ld8 r23=[r16],16 // load predicates | 1169 | ld8 r23=[r16],16 // load predicates |
1173 | ld8 r22=[r17],16 // load b0 | 1170 | ld8 r22=[r17],16 // load b0 |
1174 | ;; | 1171 | ;; |
1175 | ld8 r20=[r16],16 // load ar.rsc value for "loadrs" | 1172 | ld8 r20=[r16],16 // load ar.rsc value for "loadrs" |
1176 | ld8.fill r1=[r17],16 //load r1 | 1173 | ld8.fill r1=[r17],16 //load r1 |
1177 | ;; | 1174 | ;; |
1178 | ld8.fill r12=[r16],16 //load r12 | 1175 | ld8.fill r12=[r16],16 //load r12 |
1179 | ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13 | 1176 | ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13 |
1180 | ;; | 1177 | ;; |
1181 | ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr | 1178 | ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr |
1182 | ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2 | 1179 | ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2 |
1183 | ;; | 1180 | ;; |
1184 | ld8.fill r3=[r16] //load r3 | 1181 | ld8.fill r3=[r16] //load r3 |
1185 | ld8 r18=[r17] //load ar_ccv | 1182 | ld8 r18=[r17] //load ar_ccv |
1186 | ;; | 1183 | ;; |
1187 | mov ar.fpsr=r19 | 1184 | mov ar.fpsr=r19 |
1188 | mov ar.ccv=r18 | 1185 | mov ar.ccv=r18 |
1189 | shr.u r18=r20,16 | 1186 | shr.u r18=r20,16 |
1190 | ;; | 1187 | ;; |
1191 | kvm_rbs_switch: | 1188 | kvm_rbs_switch: |
1192 | mov r19=96 | 1189 | mov r19=96 |
1193 | 1190 | ||
1194 | kvm_dont_preserve_current_frame: | 1191 | kvm_dont_preserve_current_frame: |
1195 | /* | 1192 | /* |
@@ -1201,76 +1198,76 @@ kvm_dont_preserve_current_frame: | |||
1201 | # define pReturn p7 | 1198 | # define pReturn p7 |
1202 | # define Nregs 14 | 1199 | # define Nregs 14 |
1203 | 1200 | ||
1204 | alloc loc0=ar.pfs,2,Nregs-2,2,0 | 1201 | alloc loc0=ar.pfs,2,Nregs-2,2,0 |
1205 | shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) | 1202 | shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) |
1206 | sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize | 1203 | sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize |
1207 | ;; | 1204 | ;; |
1208 | mov ar.rsc=r20 // load ar.rsc to be used for "loadrs" | 1205 | mov ar.rsc=r20 // load ar.rsc to be used for "loadrs" |
1209 | shladd in0=loc1,3,r19 | 1206 | shladd in0=loc1,3,r19 |
1210 | mov in1=0 | 1207 | mov in1=0 |
1211 | ;; | 1208 | ;; |
1212 | TEXT_ALIGN(32) | 1209 | TEXT_ALIGN(32) |
1213 | kvm_rse_clear_invalid: | 1210 | kvm_rse_clear_invalid: |
1214 | alloc loc0=ar.pfs,2,Nregs-2,2,0 | 1211 | alloc loc0=ar.pfs,2,Nregs-2,2,0 |
1215 | cmp.lt pRecurse,p0=Nregs*8,in0 | 1212 | cmp.lt pRecurse,p0=Nregs*8,in0 |
1216 | // if more than Nregs regs left to clear, (re)curse | 1213 | // if more than Nregs regs left to clear, (re)curse |
1217 | add out0=-Nregs*8,in0 | 1214 | add out0=-Nregs*8,in0 |
1218 | add out1=1,in1 // increment recursion count | 1215 | add out1=1,in1 // increment recursion count |
1219 | mov loc1=0 | 1216 | mov loc1=0 |
1220 | mov loc2=0 | 1217 | mov loc2=0 |
1221 | ;; | 1218 | ;; |
1222 | mov loc3=0 | 1219 | mov loc3=0 |
1223 | mov loc4=0 | 1220 | mov loc4=0 |
1224 | mov loc5=0 | 1221 | mov loc5=0 |
1225 | mov loc6=0 | 1222 | mov loc6=0 |
1226 | mov loc7=0 | 1223 | mov loc7=0 |
1227 | (pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid | 1224 | (pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid |
1228 | ;; | 1225 | ;; |
1229 | mov loc8=0 | 1226 | mov loc8=0 |
1230 | mov loc9=0 | 1227 | mov loc9=0 |
1231 | cmp.ne pReturn,p0=r0,in1 | 1228 | cmp.ne pReturn,p0=r0,in1 |
1232 | // if recursion count != 0, we need to do a br.ret | 1229 | // if recursion count != 0, we need to do a br.ret |
1233 | mov loc10=0 | 1230 | mov loc10=0 |
1234 | mov loc11=0 | 1231 | mov loc11=0 |
1235 | (pReturn) br.ret.dptk.many b0 | 1232 | (pReturn) br.ret.dptk.many b0 |
1236 | 1233 | ||
1237 | # undef pRecurse | 1234 | # undef pRecurse |
1238 | # undef pReturn | 1235 | # undef pReturn |
1239 | 1236 | ||
1240 | // loadrs has already been shifted | 1237 | // loadrs has already been shifted |
1241 | alloc r16=ar.pfs,0,0,0,0 // drop current register frame | 1238 | alloc r16=ar.pfs,0,0,0,0 // drop current register frame |
1242 | ;; | 1239 | ;; |
1243 | loadrs | 1240 | loadrs |
1244 | ;; | 1241 | ;; |
1245 | mov ar.bspstore=r24 | 1242 | mov ar.bspstore=r24 |
1246 | ;; | 1243 | ;; |
1247 | mov ar.unat=r28 | 1244 | mov ar.unat=r28 |
1248 | mov ar.rnat=r25 | 1245 | mov ar.rnat=r25 |
1249 | mov ar.rsc=r26 | 1246 | mov ar.rsc=r26 |
1250 | ;; | 1247 | ;; |
1251 | mov cr.ipsr=r31 | 1248 | mov cr.ipsr=r31 |
1252 | mov cr.iip=r30 | 1249 | mov cr.iip=r30 |
1253 | mov cr.ifs=r29 | 1250 | mov cr.ifs=r29 |
1254 | mov ar.pfs=r27 | 1251 | mov ar.pfs=r27 |
1255 | adds r18=VMM_VPD_BASE_OFFSET,r21 | 1252 | adds r18=VMM_VPD_BASE_OFFSET,r21 |
1256 | ;; | 1253 | ;; |
1257 | ld8 r18=[r18] //vpd | 1254 | ld8 r18=[r18] //vpd |
1258 | adds r17=VMM_VCPU_ISR_OFFSET,r21 | 1255 | adds r17=VMM_VCPU_ISR_OFFSET,r21 |
1259 | ;; | 1256 | ;; |
1260 | ld8 r17=[r17] | 1257 | ld8 r17=[r17] |
1261 | adds r19=VMM_VPD_VPSR_OFFSET,r18 | 1258 | adds r19=VMM_VPD_VPSR_OFFSET,r18 |
1262 | ;; | 1259 | ;; |
1263 | ld8 r19=[r19] //vpsr | 1260 | ld8 r19=[r19] //vpsr |
1264 | mov r25=r18 | 1261 | mov r25=r18 |
1265 | adds r16= VMM_VCPU_GP_OFFSET,r21 | 1262 | adds r16= VMM_VCPU_GP_OFFSET,r21 |
1266 | ;; | 1263 | ;; |
1267 | ld8 r16= [r16] // Put gp in r24 | 1264 | ld8 r16= [r16] // Put gp in r24 |
1268 | movl r24=@gprel(ia64_vmm_entry) // calculate return address | 1265 | movl r24=@gprel(ia64_vmm_entry) // calculate return address |
1269 | ;; | 1266 | ;; |
1270 | add r24=r24,r16 | 1267 | add r24=r24,r16 |
1271 | ;; | 1268 | ;; |
1272 | br.sptk.many kvm_vps_sync_write // call the service | 1269 | br.sptk.many kvm_vps_sync_write // call the service |
1273 | ;; | 1270 | ;; |
1274 | END(ia64_leave_hypervisor) | 1271 | END(ia64_leave_hypervisor) |
1275 | // fall through | 1272 | // fall through |
1276 | GLOBAL_ENTRY(ia64_vmm_entry) | 1273 | GLOBAL_ENTRY(ia64_vmm_entry) |
@@ -1283,16 +1280,14 @@ GLOBAL_ENTRY(ia64_vmm_entry) | |||
1283 | * r22:b0 | 1280 | * r22:b0 |
1284 | * r23:predicate | 1281 | * r23:predicate |
1285 | */ | 1282 | */ |
1286 | mov r24=r22 | 1283 | mov r24=r22 |
1287 | mov r25=r18 | 1284 | mov r25=r18 |
1288 | tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic | 1285 | tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic |
1289 | (p1) br.cond.sptk.few kvm_vps_resume_normal | 1286 | (p1) br.cond.sptk.few kvm_vps_resume_normal |
1290 | (p2) br.cond.sptk.many kvm_vps_resume_handler | 1287 | (p2) br.cond.sptk.many kvm_vps_resume_handler |
1291 | ;; | 1288 | ;; |
1292 | END(ia64_vmm_entry) | 1289 | END(ia64_vmm_entry) |
1293 | 1290 | ||
1294 | |||
1295 | |||
1296 | /* | 1291 | /* |
1297 | * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, | 1292 | * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, |
1298 | * u64 arg3, u64 arg4, u64 arg5, | 1293 | * u64 arg3, u64 arg4, u64 arg5, |
@@ -1310,88 +1305,88 @@ psrsave = loc2 | |||
1310 | entry = loc3 | 1305 | entry = loc3 |
1311 | hostret = r24 | 1306 | hostret = r24 |
1312 | 1307 | ||
1313 | alloc pfssave=ar.pfs,4,4,0,0 | 1308 | alloc pfssave=ar.pfs,4,4,0,0 |
1314 | mov rpsave=rp | 1309 | mov rpsave=rp |
1315 | adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13 | 1310 | adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13 |
1316 | ;; | 1311 | ;; |
1317 | ld8 entry=[entry] | 1312 | ld8 entry=[entry] |
1318 | 1: mov hostret=ip | 1313 | 1: mov hostret=ip |
1319 | mov r25=in1 // copy arguments | 1314 | mov r25=in1 // copy arguments |
1320 | mov r26=in2 | 1315 | mov r26=in2 |
1321 | mov r27=in3 | 1316 | mov r27=in3 |
1322 | mov psrsave=psr | 1317 | mov psrsave=psr |
1323 | ;; | 1318 | ;; |
1324 | tbit.nz p6,p0=psrsave,14 // IA64_PSR_I | 1319 | tbit.nz p6,p0=psrsave,14 // IA64_PSR_I |
1325 | tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC | 1320 | tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC |
1326 | ;; | 1321 | ;; |
1327 | add hostret=2f-1b,hostret // calculate return address | 1322 | add hostret=2f-1b,hostret // calculate return address |
1328 | add entry=entry,in0 | 1323 | add entry=entry,in0 |
1329 | ;; | 1324 | ;; |
1330 | rsm psr.i | psr.ic | 1325 | rsm psr.i | psr.ic |
1331 | ;; | 1326 | ;; |
1332 | srlz.i | 1327 | srlz.i |
1333 | mov b6=entry | 1328 | mov b6=entry |
1334 | br.cond.sptk b6 // call the service | 1329 | br.cond.sptk b6 // call the service |
1335 | 2: | 1330 | 2: |
1336 | // Architectural sequence for enabling interrupts if necessary | 1331 | // Architectural sequence for enabling interrupts if necessary |
1337 | (p7) ssm psr.ic | 1332 | (p7) ssm psr.ic |
1338 | ;; | 1333 | ;; |
1339 | (p7) srlz.i | 1334 | (p7) srlz.i |
1340 | ;; | 1335 | ;; |
1341 | //(p6) ssm psr.i | 1336 | //(p6) ssm psr.i |
1342 | ;; | 1337 | ;; |
1343 | mov rp=rpsave | 1338 | mov rp=rpsave |
1344 | mov ar.pfs=pfssave | 1339 | mov ar.pfs=pfssave |
1345 | mov r8=r31 | 1340 | mov r8=r31 |
1346 | ;; | 1341 | ;; |
1347 | srlz.d | 1342 | srlz.d |
1348 | br.ret.sptk rp | 1343 | br.ret.sptk rp |
1349 | 1344 | ||
1350 | END(ia64_call_vsa) | 1345 | END(ia64_call_vsa) |
1351 | 1346 | ||
1352 | #define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100) | 1347 | #define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100) |
1353 | 1348 | ||
1354 | GLOBAL_ENTRY(vmm_reset_entry) | 1349 | GLOBAL_ENTRY(vmm_reset_entry) |
1355 | //set up ipsr, iip, vpd.vpsr, dcr | 1350 | //set up ipsr, iip, vpd.vpsr, dcr |
1356 | // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 | 1351 | // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 |
1357 | // For DCR: all bits 0 | 1352 | // For DCR: all bits 0 |
1358 | bsw.0 | 1353 | bsw.0 |
1359 | ;; | 1354 | ;; |
1360 | mov r21 =r13 | 1355 | mov r21 =r13 |
1361 | adds r14=-VMM_PT_REGS_SIZE, r12 | 1356 | adds r14=-VMM_PT_REGS_SIZE, r12 |
1362 | ;; | 1357 | ;; |
1363 | movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 | 1358 | movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 |
1364 | movl r10=0x8000000000000000 | 1359 | movl r10=0x8000000000000000 |
1365 | adds r16=PT(CR_IIP), r14 | 1360 | adds r16=PT(CR_IIP), r14 |
1366 | adds r20=PT(R1), r14 | 1361 | adds r20=PT(R1), r14 |
1367 | ;; | 1362 | ;; |
1368 | rsm psr.ic | psr.i | 1363 | rsm psr.ic | psr.i |
1369 | ;; | 1364 | ;; |
1370 | srlz.i | 1365 | srlz.i |
1371 | ;; | 1366 | ;; |
1372 | mov ar.rsc = 0 | 1367 | mov ar.rsc = 0 |
1373 | ;; | 1368 | ;; |
1374 | flushrs | 1369 | flushrs |
1375 | ;; | 1370 | ;; |
1376 | mov ar.bspstore = 0 | 1371 | mov ar.bspstore = 0 |
1377 | // clear BSPSTORE | 1372 | // clear BSPSTORE |
1378 | ;; | 1373 | ;; |
1379 | mov cr.ipsr=r6 | 1374 | mov cr.ipsr=r6 |
1380 | mov cr.ifs=r10 | 1375 | mov cr.ifs=r10 |
1381 | ld8 r4 = [r16] // Set init iip for first run. | 1376 | ld8 r4 = [r16] // Set init iip for first run. |
1382 | ld8 r1 = [r20] | 1377 | ld8 r1 = [r20] |
1383 | ;; | 1378 | ;; |
1384 | mov cr.iip=r4 | 1379 | mov cr.iip=r4 |
1385 | adds r16=VMM_VPD_BASE_OFFSET,r13 | 1380 | adds r16=VMM_VPD_BASE_OFFSET,r13 |
1386 | ;; | 1381 | ;; |
1387 | ld8 r18=[r16] | 1382 | ld8 r18=[r16] |
1388 | ;; | 1383 | ;; |
1389 | adds r19=VMM_VPD_VPSR_OFFSET,r18 | 1384 | adds r19=VMM_VPD_VPSR_OFFSET,r18 |
1390 | ;; | 1385 | ;; |
1391 | ld8 r19=[r19] | 1386 | ld8 r19=[r19] |
1392 | mov r17=r0 | 1387 | mov r17=r0 |
1393 | mov r22=r0 | 1388 | mov r22=r0 |
1394 | mov r23=r0 | 1389 | mov r23=r0 |
1395 | br.cond.sptk ia64_vmm_entry | 1390 | br.cond.sptk ia64_vmm_entry |
1396 | br.ret.sptk b0 | 1391 | br.ret.sptk b0 |
1397 | END(vmm_reset_entry) | 1392 | END(vmm_reset_entry) |