aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kvm
diff options
context:
space:
mode:
authorXiantao Zhang <xiantao.zhang@intel.com>2008-11-21 08:04:37 -0500
committerAvi Kivity <avi@redhat.com>2008-12-31 09:54:59 -0500
commit8fe0736763a07fbea56213ea105a0c2ee098e6fc (patch)
tree38919a6c2075b5e33f1dfb9d6265833366172ad9 /arch/ia64/kvm
parent9f7d5bb5e2abf5316bb17eb3e7751dbafa09e5cf (diff)
KVM: ia64: Clean up vmm_ivt.S using tab to indent every line
Using tab for indentation for vmm_ivt.S. Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/ia64/kvm')
-rw-r--r--arch/ia64/kvm/vmm_ivt.S1470
1 files changed, 729 insertions, 741 deletions
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
index 50b464628536..3ef1a017a318 100644
--- a/arch/ia64/kvm/vmm_ivt.S
+++ b/arch/ia64/kvm/vmm_ivt.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * /ia64/kvm_ivt.S 2 * arch/ia64/kvm/vmm_ivt.S
3 * 3 *
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co 4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com> 5 * Stephane Eranian <eranian@hpl.hp.com>
@@ -71,37 +71,37 @@
71#endif 71#endif
72 72
73#define KVM_FAULT(n) \ 73#define KVM_FAULT(n) \
74 kvm_fault_##n:; \ 74 kvm_fault_##n:; \
75 mov r19=n;; \ 75 mov r19=n;; \
76 br.sptk.many kvm_vmm_panic; \ 76 br.sptk.many kvm_vmm_panic; \
77 ;; \ 77 ;; \
78 78
79#define KVM_REFLECT(n) \ 79#define KVM_REFLECT(n) \
80 mov r31=pr; \ 80 mov r31=pr; \
81 mov r19=n; /* prepare to save predicates */ \ 81 mov r19=n; /* prepare to save predicates */ \
82 mov r29=cr.ipsr; \ 82 mov r29=cr.ipsr; \
83 ;; \ 83 ;; \
84 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ 84 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
85(p7)br.sptk.many kvm_dispatch_reflection; \ 85(p7) br.sptk.many kvm_dispatch_reflection; \
86 br.sptk.many kvm_vmm_panic; \ 86 br.sptk.many kvm_vmm_panic; \
87 87
88GLOBAL_ENTRY(kvm_vmm_panic) 88GLOBAL_ENTRY(kvm_vmm_panic)
89 KVM_SAVE_MIN_WITH_COVER_R19 89 KVM_SAVE_MIN_WITH_COVER_R19
90 alloc r14=ar.pfs,0,0,1,0 90 alloc r14=ar.pfs,0,0,1,0
91 mov out0=r15 91 mov out0=r15
92 adds r3=8,r2 // set up second base pointer 92 adds r3=8,r2 // set up second base pointer
93 ;; 93 ;;
94 ssm psr.ic 94 ssm psr.ic
95 ;; 95 ;;
96 srlz.i // guarantee that interruption collection is on 96 srlz.i // guarantee that interruption collection is on
97 ;; 97 ;;
98 //(p15) ssm psr.i // restore psr.i 98 //(p15) ssm psr.i // restore psr.i
99 addl r14=@gprel(ia64_leave_hypervisor),gp 99 addl r14=@gprel(ia64_leave_hypervisor),gp
100 ;; 100 ;;
101 KVM_SAVE_REST 101 KVM_SAVE_REST
102 mov rp=r14 102 mov rp=r14
103 ;; 103 ;;
104 br.call.sptk.many b6=vmm_panic_handler; 104 br.call.sptk.many b6=vmm_panic_handler;
105END(kvm_vmm_panic) 105END(kvm_vmm_panic)
106 106
107 .section .text.ivt,"ax" 107 .section .text.ivt,"ax"
@@ -112,308 +112,307 @@ kvm_ia64_ivt:
112/////////////////////////////////////////////////////////////// 112///////////////////////////////////////////////////////////////
113// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) 113// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
114ENTRY(kvm_vhpt_miss) 114ENTRY(kvm_vhpt_miss)
115 KVM_FAULT(0) 115 KVM_FAULT(0)
116END(kvm_vhpt_miss) 116END(kvm_vhpt_miss)
117 117
118
119 .org kvm_ia64_ivt+0x400 118 .org kvm_ia64_ivt+0x400
120//////////////////////////////////////////////////////////////// 119////////////////////////////////////////////////////////////////
121// 0x0400 Entry 1 (size 64 bundles) ITLB (21) 120// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
122ENTRY(kvm_itlb_miss) 121ENTRY(kvm_itlb_miss)
123 mov r31 = pr 122 mov r31 = pr
124 mov r29=cr.ipsr; 123 mov r29=cr.ipsr;
125 ;; 124 ;;
126 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; 125 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
127 (p6) br.sptk kvm_alt_itlb_miss 126(p6) br.sptk kvm_alt_itlb_miss
128 mov r19 = 1 127 mov r19 = 1
129 br.sptk kvm_itlb_miss_dispatch 128 br.sptk kvm_itlb_miss_dispatch
130 KVM_FAULT(1); 129 KVM_FAULT(1);
131END(kvm_itlb_miss) 130END(kvm_itlb_miss)
132 131
133 .org kvm_ia64_ivt+0x0800 132 .org kvm_ia64_ivt+0x0800
134////////////////////////////////////////////////////////////////// 133//////////////////////////////////////////////////////////////////
135// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) 134// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
136ENTRY(kvm_dtlb_miss) 135ENTRY(kvm_dtlb_miss)
137 mov r31 = pr 136 mov r31 = pr
138 mov r29=cr.ipsr; 137 mov r29=cr.ipsr;
139 ;; 138 ;;
140 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; 139 tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
141(p6)br.sptk kvm_alt_dtlb_miss 140(p6) br.sptk kvm_alt_dtlb_miss
142 br.sptk kvm_dtlb_miss_dispatch 141 br.sptk kvm_dtlb_miss_dispatch
143END(kvm_dtlb_miss) 142END(kvm_dtlb_miss)
144 143
145 .org kvm_ia64_ivt+0x0c00 144 .org kvm_ia64_ivt+0x0c00
146//////////////////////////////////////////////////////////////////// 145////////////////////////////////////////////////////////////////////
147// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) 146// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
148ENTRY(kvm_alt_itlb_miss) 147ENTRY(kvm_alt_itlb_miss)
149 mov r16=cr.ifa // get address that caused the TLB miss 148 mov r16=cr.ifa // get address that caused the TLB miss
150 ;; 149 ;;
151 movl r17=PAGE_KERNEL 150 movl r17=PAGE_KERNEL
152 mov r24=cr.ipsr 151 mov r24=cr.ipsr
153 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 152 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
154 ;; 153 ;;
155 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits 154 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
156 ;; 155 ;;
157 or r19=r17,r19 // insert PTE control bits into r19 156 or r19=r17,r19 // insert PTE control bits into r19
158 ;; 157 ;;
159 movl r20=IA64_GRANULE_SHIFT<<2 158 movl r20=IA64_GRANULE_SHIFT<<2
160 ;; 159 ;;
161 mov cr.itir=r20 160 mov cr.itir=r20
162 ;; 161 ;;
163 itc.i r19 // insert the TLB entry 162 itc.i r19 // insert the TLB entry
164 mov pr=r31,-1 163 mov pr=r31,-1
165 rfi 164 rfi
166END(kvm_alt_itlb_miss) 165END(kvm_alt_itlb_miss)
167 166
168 .org kvm_ia64_ivt+0x1000 167 .org kvm_ia64_ivt+0x1000
169///////////////////////////////////////////////////////////////////// 168/////////////////////////////////////////////////////////////////////
170// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) 169// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
171ENTRY(kvm_alt_dtlb_miss) 170ENTRY(kvm_alt_dtlb_miss)
172 mov r16=cr.ifa // get address that caused the TLB miss 171 mov r16=cr.ifa // get address that caused the TLB miss
173 ;; 172 ;;
174 movl r17=PAGE_KERNEL 173 movl r17=PAGE_KERNEL
175 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 174 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
176 mov r24=cr.ipsr 175 mov r24=cr.ipsr
177 ;; 176 ;;
178 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits 177 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
179 ;; 178 ;;
180 or r19=r19,r17 // insert PTE control bits into r19 179 or r19=r19,r17 // insert PTE control bits into r19
181 ;; 180 ;;
182 movl r20=IA64_GRANULE_SHIFT<<2 181 movl r20=IA64_GRANULE_SHIFT<<2
183 ;; 182 ;;
184 mov cr.itir=r20 183 mov cr.itir=r20
185 ;; 184 ;;
186 itc.d r19 // insert the TLB entry 185 itc.d r19 // insert the TLB entry
187 mov pr=r31,-1 186 mov pr=r31,-1
188 rfi 187 rfi
189END(kvm_alt_dtlb_miss) 188END(kvm_alt_dtlb_miss)
190 189
191 .org kvm_ia64_ivt+0x1400 190 .org kvm_ia64_ivt+0x1400
192////////////////////////////////////////////////////////////////////// 191//////////////////////////////////////////////////////////////////////
193// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) 192// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
194ENTRY(kvm_nested_dtlb_miss) 193ENTRY(kvm_nested_dtlb_miss)
195 KVM_FAULT(5) 194 KVM_FAULT(5)
196END(kvm_nested_dtlb_miss) 195END(kvm_nested_dtlb_miss)
197 196
198 .org kvm_ia64_ivt+0x1800 197 .org kvm_ia64_ivt+0x1800
199///////////////////////////////////////////////////////////////////// 198/////////////////////////////////////////////////////////////////////
200// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) 199// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
201ENTRY(kvm_ikey_miss) 200ENTRY(kvm_ikey_miss)
202 KVM_REFLECT(6) 201 KVM_REFLECT(6)
203END(kvm_ikey_miss) 202END(kvm_ikey_miss)
204 203
205 .org kvm_ia64_ivt+0x1c00 204 .org kvm_ia64_ivt+0x1c00
206///////////////////////////////////////////////////////////////////// 205/////////////////////////////////////////////////////////////////////
207// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 206// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
208ENTRY(kvm_dkey_miss) 207ENTRY(kvm_dkey_miss)
209 KVM_REFLECT(7) 208 KVM_REFLECT(7)
210END(kvm_dkey_miss) 209END(kvm_dkey_miss)
211 210
212 .org kvm_ia64_ivt+0x2000 211 .org kvm_ia64_ivt+0x2000
213//////////////////////////////////////////////////////////////////// 212////////////////////////////////////////////////////////////////////
214// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) 213// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
215ENTRY(kvm_dirty_bit) 214ENTRY(kvm_dirty_bit)
216 KVM_REFLECT(8) 215 KVM_REFLECT(8)
217END(kvm_dirty_bit) 216END(kvm_dirty_bit)
218 217
219 .org kvm_ia64_ivt+0x2400 218 .org kvm_ia64_ivt+0x2400
220//////////////////////////////////////////////////////////////////// 219////////////////////////////////////////////////////////////////////
221// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) 220// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
222ENTRY(kvm_iaccess_bit) 221ENTRY(kvm_iaccess_bit)
223 KVM_REFLECT(9) 222 KVM_REFLECT(9)
224END(kvm_iaccess_bit) 223END(kvm_iaccess_bit)
225 224
226 .org kvm_ia64_ivt+0x2800 225 .org kvm_ia64_ivt+0x2800
227/////////////////////////////////////////////////////////////////// 226///////////////////////////////////////////////////////////////////
228// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) 227// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
229ENTRY(kvm_daccess_bit) 228ENTRY(kvm_daccess_bit)
230 KVM_REFLECT(10) 229 KVM_REFLECT(10)
231END(kvm_daccess_bit) 230END(kvm_daccess_bit)
232 231
233 .org kvm_ia64_ivt+0x2c00 232 .org kvm_ia64_ivt+0x2c00
234///////////////////////////////////////////////////////////////// 233/////////////////////////////////////////////////////////////////
235// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) 234// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
236ENTRY(kvm_break_fault) 235ENTRY(kvm_break_fault)
237 mov r31=pr 236 mov r31=pr
238 mov r19=11 237 mov r19=11
239 mov r29=cr.ipsr 238 mov r29=cr.ipsr
240 ;; 239 ;;
241 KVM_SAVE_MIN_WITH_COVER_R19 240 KVM_SAVE_MIN_WITH_COVER_R19
242 ;; 241 ;;
243 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) 242 alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!)
244 mov out0=cr.ifa 243 mov out0=cr.ifa
245 mov out2=cr.isr // FIXME: pity to make this slow access twice 244 mov out2=cr.isr // FIXME: pity to make this slow access twice
246 mov out3=cr.iim // FIXME: pity to make this slow access twice 245 mov out3=cr.iim // FIXME: pity to make this slow access twice
247 adds r3=8,r2 // set up second base pointer 246 adds r3=8,r2 // set up second base pointer
248 ;; 247 ;;
249 ssm psr.ic 248 ssm psr.ic
250 ;; 249 ;;
251 srlz.i // guarantee that interruption collection is on 250 srlz.i // guarantee that interruption collection is on
252 ;; 251 ;;
253 //(p15)ssm psr.i // restore psr.i 252 //(p15)ssm psr.i // restore psr.i
254 addl r14=@gprel(ia64_leave_hypervisor),gp 253 addl r14=@gprel(ia64_leave_hypervisor),gp
255 ;; 254 ;;
256 KVM_SAVE_REST 255 KVM_SAVE_REST
257 mov rp=r14 256 mov rp=r14
258 ;; 257 ;;
259 adds out1=16,sp 258 adds out1=16,sp
260 br.call.sptk.many b6=kvm_ia64_handle_break 259 br.call.sptk.many b6=kvm_ia64_handle_break
261 ;; 260 ;;
262END(kvm_break_fault) 261END(kvm_break_fault)
263 262
264 .org kvm_ia64_ivt+0x3000 263 .org kvm_ia64_ivt+0x3000
265///////////////////////////////////////////////////////////////// 264/////////////////////////////////////////////////////////////////
266// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) 265// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
267ENTRY(kvm_interrupt) 266ENTRY(kvm_interrupt)
268 mov r31=pr // prepare to save predicates 267 mov r31=pr // prepare to save predicates
269 mov r19=12 268 mov r19=12
270 mov r29=cr.ipsr 269 mov r29=cr.ipsr
271 ;; 270 ;;
272 tbit.z p6,p7=r29,IA64_PSR_VM_BIT 271 tbit.z p6,p7=r29,IA64_PSR_VM_BIT
273 tbit.z p0,p15=r29,IA64_PSR_I_BIT 272 tbit.z p0,p15=r29,IA64_PSR_I_BIT
274 ;; 273 ;;
275(p7) br.sptk kvm_dispatch_interrupt 274(p7) br.sptk kvm_dispatch_interrupt
276 ;; 275 ;;
277 mov r27=ar.rsc /* M */ 276 mov r27=ar.rsc /* M */
278 mov r20=r1 /* A */ 277 mov r20=r1 /* A */
279 mov r25=ar.unat /* M */ 278 mov r25=ar.unat /* M */
280 mov r26=ar.pfs /* I */ 279 mov r26=ar.pfs /* I */
281 mov r28=cr.iip /* M */ 280 mov r28=cr.iip /* M */
282 cover /* B (or nothing) */ 281 cover /* B (or nothing) */
283 ;; 282 ;;
284 mov r1=sp 283 mov r1=sp
285 ;; 284 ;;
286 invala /* M */ 285 invala /* M */
287 mov r30=cr.ifs 286 mov r30=cr.ifs
288 ;; 287 ;;
289 addl r1=-VMM_PT_REGS_SIZE,r1 288 addl r1=-VMM_PT_REGS_SIZE,r1
290 ;; 289 ;;
291 adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ 290 adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
292 adds r16=PT(CR_IPSR),r1 291 adds r16=PT(CR_IPSR),r1
293 ;; 292 ;;
294 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES 293 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
295 st8 [r16]=r29 /* save cr.ipsr */ 294 st8 [r16]=r29 /* save cr.ipsr */
296 ;; 295 ;;
297 lfetch.fault.excl.nt1 [r17] 296 lfetch.fault.excl.nt1 [r17]
298 mov r29=b0 297 mov r29=b0
299 ;; 298 ;;
300 adds r16=PT(R8),r1 /* initialize first base pointer */ 299 adds r16=PT(R8),r1 /* initialize first base pointer */
301 adds r17=PT(R9),r1 /* initialize second base pointer */ 300 adds r17=PT(R9),r1 /* initialize second base pointer */
302 mov r18=r0 /* make sure r18 isn't NaT */ 301 mov r18=r0 /* make sure r18 isn't NaT */
303 ;; 302 ;;
304.mem.offset 0,0; st8.spill [r16]=r8,16 303.mem.offset 0,0; st8.spill [r16]=r8,16
305.mem.offset 8,0; st8.spill [r17]=r9,16 304.mem.offset 8,0; st8.spill [r17]=r9,16
306 ;; 305 ;;
307.mem.offset 0,0; st8.spill [r16]=r10,24 306.mem.offset 0,0; st8.spill [r16]=r10,24
308.mem.offset 8,0; st8.spill [r17]=r11,24 307.mem.offset 8,0; st8.spill [r17]=r11,24
309 ;; 308 ;;
310 st8 [r16]=r28,16 /* save cr.iip */ 309 st8 [r16]=r28,16 /* save cr.iip */
311 st8 [r17]=r30,16 /* save cr.ifs */ 310 st8 [r17]=r30,16 /* save cr.ifs */
312 mov r8=ar.fpsr /* M */ 311 mov r8=ar.fpsr /* M */
313 mov r9=ar.csd 312 mov r9=ar.csd
314 mov r10=ar.ssd 313 mov r10=ar.ssd
315 movl r11=FPSR_DEFAULT /* L-unit */ 314 movl r11=FPSR_DEFAULT /* L-unit */
316 ;; 315 ;;
317 st8 [r16]=r25,16 /* save ar.unat */ 316 st8 [r16]=r25,16 /* save ar.unat */
318 st8 [r17]=r26,16 /* save ar.pfs */ 317 st8 [r17]=r26,16 /* save ar.pfs */
319 shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */ 318 shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
320 ;; 319 ;;
321 st8 [r16]=r27,16 /* save ar.rsc */ 320 st8 [r16]=r27,16 /* save ar.rsc */
322 adds r17=16,r17 /* skip over ar_rnat field */ 321 adds r17=16,r17 /* skip over ar_rnat field */
323 ;; 322 ;;
324 st8 [r17]=r31,16 /* save predicates */ 323 st8 [r17]=r31,16 /* save predicates */
325 adds r16=16,r16 /* skip over ar_bspstore field */ 324 adds r16=16,r16 /* skip over ar_bspstore field */
326 ;; 325 ;;
327 st8 [r16]=r29,16 /* save b0 */ 326 st8 [r16]=r29,16 /* save b0 */
328 st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */ 327 st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
329 ;; 328 ;;
330.mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ 329.mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
331.mem.offset 8,0; st8.spill [r17]=r12,16 330.mem.offset 8,0; st8.spill [r17]=r12,16
332 adds r12=-16,r1 331 adds r12=-16,r1
333 /* switch to kernel memory stack (with 16 bytes of scratch) */ 332 /* switch to kernel memory stack (with 16 bytes of scratch) */
334 ;; 333 ;;
335.mem.offset 0,0; st8.spill [r16]=r13,16 334.mem.offset 0,0; st8.spill [r16]=r13,16
336.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */ 335.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
337 ;; 336 ;;
338.mem.offset 0,0; st8.spill [r16]=r15,16 337.mem.offset 0,0; st8.spill [r16]=r15,16
339.mem.offset 8,0; st8.spill [r17]=r14,16 338.mem.offset 8,0; st8.spill [r17]=r14,16
340 dep r14=-1,r0,60,4 339 dep r14=-1,r0,60,4
341 ;; 340 ;;
342.mem.offset 0,0; st8.spill [r16]=r2,16 341.mem.offset 0,0; st8.spill [r16]=r2,16
343.mem.offset 8,0; st8.spill [r17]=r3,16 342.mem.offset 8,0; st8.spill [r17]=r3,16
344 adds r2=VMM_PT_REGS_R16_OFFSET,r1 343 adds r2=VMM_PT_REGS_R16_OFFSET,r1
345 adds r14 = VMM_VCPU_GP_OFFSET,r13 344 adds r14 = VMM_VCPU_GP_OFFSET,r13
346 ;; 345 ;;
347 mov r8=ar.ccv 346 mov r8=ar.ccv
348 ld8 r14 = [r14] 347 ld8 r14 = [r14]
349 ;; 348 ;;
350 mov r1=r14 /* establish kernel global pointer */ 349 mov r1=r14 /* establish kernel global pointer */
351 ;; \ 350 ;; \
352 bsw.1 351 bsw.1
353 ;; 352 ;;
354 alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group 353 alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
355 mov out0=r13 354 mov out0=r13
356 ;; 355 ;;
357 ssm psr.ic 356 ssm psr.ic
358 ;; 357 ;;
359 srlz.i 358 srlz.i
360 ;; 359 ;;
361 //(p15) ssm psr.i 360 //(p15) ssm psr.i
362 adds r3=8,r2 // set up second base pointer for SAVE_REST 361 adds r3=8,r2 // set up second base pointer for SAVE_REST
363 srlz.i // ensure everybody knows psr.ic is back on 362 srlz.i // ensure everybody knows psr.ic is back on
364 ;; 363 ;;
365.mem.offset 0,0; st8.spill [r2]=r16,16 364.mem.offset 0,0; st8.spill [r2]=r16,16
366.mem.offset 8,0; st8.spill [r3]=r17,16 365.mem.offset 8,0; st8.spill [r3]=r17,16
367 ;; 366 ;;
368.mem.offset 0,0; st8.spill [r2]=r18,16 367.mem.offset 0,0; st8.spill [r2]=r18,16
369.mem.offset 8,0; st8.spill [r3]=r19,16 368.mem.offset 8,0; st8.spill [r3]=r19,16
370 ;; 369 ;;
371.mem.offset 0,0; st8.spill [r2]=r20,16 370.mem.offset 0,0; st8.spill [r2]=r20,16
372.mem.offset 8,0; st8.spill [r3]=r21,16 371.mem.offset 8,0; st8.spill [r3]=r21,16
373 mov r18=b6 372 mov r18=b6
374 ;; 373 ;;
375.mem.offset 0,0; st8.spill [r2]=r22,16 374.mem.offset 0,0; st8.spill [r2]=r22,16
376.mem.offset 8,0; st8.spill [r3]=r23,16 375.mem.offset 8,0; st8.spill [r3]=r23,16
377 mov r19=b7 376 mov r19=b7
378 ;; 377 ;;
379.mem.offset 0,0; st8.spill [r2]=r24,16 378.mem.offset 0,0; st8.spill [r2]=r24,16
380.mem.offset 8,0; st8.spill [r3]=r25,16 379.mem.offset 8,0; st8.spill [r3]=r25,16
381 ;; 380 ;;
382.mem.offset 0,0; st8.spill [r2]=r26,16 381.mem.offset 0,0; st8.spill [r2]=r26,16
383.mem.offset 8,0; st8.spill [r3]=r27,16 382.mem.offset 8,0; st8.spill [r3]=r27,16
384 ;; 383 ;;
385.mem.offset 0,0; st8.spill [r2]=r28,16 384.mem.offset 0,0; st8.spill [r2]=r28,16
386.mem.offset 8,0; st8.spill [r3]=r29,16 385.mem.offset 8,0; st8.spill [r3]=r29,16
387 ;; 386 ;;
388.mem.offset 0,0; st8.spill [r2]=r30,16 387.mem.offset 0,0; st8.spill [r2]=r30,16
389.mem.offset 8,0; st8.spill [r3]=r31,32 388.mem.offset 8,0; st8.spill [r3]=r31,32
390 ;; 389 ;;
391 mov ar.fpsr=r11 /* M-unit */ 390 mov ar.fpsr=r11 /* M-unit */
392 st8 [r2]=r8,8 /* ar.ccv */ 391 st8 [r2]=r8,8 /* ar.ccv */
393 adds r24=PT(B6)-PT(F7),r3 392 adds r24=PT(B6)-PT(F7),r3
394 ;; 393 ;;
395 stf.spill [r2]=f6,32 394 stf.spill [r2]=f6,32
396 stf.spill [r3]=f7,32 395 stf.spill [r3]=f7,32
397 ;; 396 ;;
398 stf.spill [r2]=f8,32 397 stf.spill [r2]=f8,32
399 stf.spill [r3]=f9,32 398 stf.spill [r3]=f9,32
400 ;; 399 ;;
401 stf.spill [r2]=f10 400 stf.spill [r2]=f10
402 stf.spill [r3]=f11 401 stf.spill [r3]=f11
403 adds r25=PT(B7)-PT(F11),r3 402 adds r25=PT(B7)-PT(F11),r3
404 ;; 403 ;;
405 st8 [r24]=r18,16 /* b6 */ 404 st8 [r24]=r18,16 /* b6 */
406 st8 [r25]=r19,16 /* b7 */ 405 st8 [r25]=r19,16 /* b7 */
407 ;; 406 ;;
408 st8 [r24]=r9 /* ar.csd */ 407 st8 [r24]=r9 /* ar.csd */
409 st8 [r25]=r10 /* ar.ssd */ 408 st8 [r25]=r10 /* ar.ssd */
410 ;; 409 ;;
411 srlz.d // make sure we see the effect of cr.ivr 410 srlz.d // make sure we see the effect of cr.ivr
412 addl r14=@gprel(ia64_leave_nested),gp 411 addl r14=@gprel(ia64_leave_nested),gp
413 ;; 412 ;;
414 mov rp=r14 413 mov rp=r14
415 br.call.sptk.many b6=kvm_ia64_handle_irq 414 br.call.sptk.many b6=kvm_ia64_handle_irq
416 ;; 415 ;;
417END(kvm_interrupt) 416END(kvm_interrupt)
418 417
419 .global kvm_dispatch_vexirq 418 .global kvm_dispatch_vexirq
@@ -421,387 +420,385 @@ END(kvm_interrupt)
421////////////////////////////////////////////////////////////////////// 420//////////////////////////////////////////////////////////////////////
422// 0x3400 Entry 13 (size 64 bundles) Reserved 421// 0x3400 Entry 13 (size 64 bundles) Reserved
423ENTRY(kvm_virtual_exirq) 422ENTRY(kvm_virtual_exirq)
424 mov r31=pr 423 mov r31=pr
425 mov r19=13 424 mov r19=13
426 mov r30 =r0 425 mov r30 =r0
427 ;; 426 ;;
428kvm_dispatch_vexirq: 427kvm_dispatch_vexirq:
429 cmp.eq p6,p0 = 1,r30 428 cmp.eq p6,p0 = 1,r30
430 ;; 429 ;;
431(p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 430(p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
432 ;; 431 ;;
433(p6)ld8 r1 = [r29] 432(p6) ld8 r1 = [r29]
434 ;; 433 ;;
435 KVM_SAVE_MIN_WITH_COVER_R19 434 KVM_SAVE_MIN_WITH_COVER_R19
436 alloc r14=ar.pfs,0,0,1,0 435 alloc r14=ar.pfs,0,0,1,0
437 mov out0=r13 436 mov out0=r13
438 437
439 ssm psr.ic 438 ssm psr.ic
440 ;; 439 ;;
441 srlz.i // guarantee that interruption collection is on 440 srlz.i // guarantee that interruption collection is on
442 ;; 441 ;;
443 //(p15) ssm psr.i // restore psr.i 442 //(p15) ssm psr.i // restore psr.i
444 adds r3=8,r2 // set up second base pointer 443 adds r3=8,r2 // set up second base pointer
445 ;; 444 ;;
446 KVM_SAVE_REST 445 KVM_SAVE_REST
447 addl r14=@gprel(ia64_leave_hypervisor),gp 446 addl r14=@gprel(ia64_leave_hypervisor),gp
448 ;; 447 ;;
449 mov rp=r14 448 mov rp=r14
450 br.call.sptk.many b6=kvm_vexirq 449 br.call.sptk.many b6=kvm_vexirq
451END(kvm_virtual_exirq) 450END(kvm_virtual_exirq)
452 451
453 .org kvm_ia64_ivt+0x3800 452 .org kvm_ia64_ivt+0x3800
454///////////////////////////////////////////////////////////////////// 453/////////////////////////////////////////////////////////////////////
455// 0x3800 Entry 14 (size 64 bundles) Reserved 454// 0x3800 Entry 14 (size 64 bundles) Reserved
456 KVM_FAULT(14) 455 KVM_FAULT(14)
457 // this code segment is from 2.6.16.13 456 // this code segment is from 2.6.16.13
458
459 457
460 .org kvm_ia64_ivt+0x3c00 458 .org kvm_ia64_ivt+0x3c00
461/////////////////////////////////////////////////////////////////////// 459///////////////////////////////////////////////////////////////////////
462// 0x3c00 Entry 15 (size 64 bundles) Reserved 460// 0x3c00 Entry 15 (size 64 bundles) Reserved
463 KVM_FAULT(15) 461 KVM_FAULT(15)
464
465 462
466 .org kvm_ia64_ivt+0x4000 463 .org kvm_ia64_ivt+0x4000
467/////////////////////////////////////////////////////////////////////// 464///////////////////////////////////////////////////////////////////////
468// 0x4000 Entry 16 (size 64 bundles) Reserved 465// 0x4000 Entry 16 (size 64 bundles) Reserved
469 KVM_FAULT(16) 466 KVM_FAULT(16)
470 467
471 .org kvm_ia64_ivt+0x4400 468 .org kvm_ia64_ivt+0x4400
472////////////////////////////////////////////////////////////////////// 469//////////////////////////////////////////////////////////////////////
473// 0x4400 Entry 17 (size 64 bundles) Reserved 470// 0x4400 Entry 17 (size 64 bundles) Reserved
474 KVM_FAULT(17) 471 KVM_FAULT(17)
475 472
476 .org kvm_ia64_ivt+0x4800 473 .org kvm_ia64_ivt+0x4800
477////////////////////////////////////////////////////////////////////// 474//////////////////////////////////////////////////////////////////////
478// 0x4800 Entry 18 (size 64 bundles) Reserved 475// 0x4800 Entry 18 (size 64 bundles) Reserved
479 KVM_FAULT(18) 476 KVM_FAULT(18)
480 477
481 .org kvm_ia64_ivt+0x4c00 478 .org kvm_ia64_ivt+0x4c00
482////////////////////////////////////////////////////////////////////// 479//////////////////////////////////////////////////////////////////////
483// 0x4c00 Entry 19 (size 64 bundles) Reserved 480// 0x4c00 Entry 19 (size 64 bundles) Reserved
484 KVM_FAULT(19) 481 KVM_FAULT(19)
485 482
486 .org kvm_ia64_ivt+0x5000 483 .org kvm_ia64_ivt+0x5000
487////////////////////////////////////////////////////////////////////// 484//////////////////////////////////////////////////////////////////////
488// 0x5000 Entry 20 (size 16 bundles) Page Not Present 485// 0x5000 Entry 20 (size 16 bundles) Page Not Present
489ENTRY(kvm_page_not_present) 486ENTRY(kvm_page_not_present)
490 KVM_REFLECT(20) 487 KVM_REFLECT(20)
491END(kvm_page_not_present) 488END(kvm_page_not_present)
492 489
493 .org kvm_ia64_ivt+0x5100 490 .org kvm_ia64_ivt+0x5100
494/////////////////////////////////////////////////////////////////////// 491///////////////////////////////////////////////////////////////////////
495// 0x5100 Entry 21 (size 16 bundles) Key Permission vector 492// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
496ENTRY(kvm_key_permission) 493ENTRY(kvm_key_permission)
497 KVM_REFLECT(21) 494 KVM_REFLECT(21)
498END(kvm_key_permission) 495END(kvm_key_permission)
499 496
500 .org kvm_ia64_ivt+0x5200 497 .org kvm_ia64_ivt+0x5200
501////////////////////////////////////////////////////////////////////// 498//////////////////////////////////////////////////////////////////////
502// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) 499// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
503ENTRY(kvm_iaccess_rights) 500ENTRY(kvm_iaccess_rights)
504 KVM_REFLECT(22) 501 KVM_REFLECT(22)
505END(kvm_iaccess_rights) 502END(kvm_iaccess_rights)
506 503
507 .org kvm_ia64_ivt+0x5300 504 .org kvm_ia64_ivt+0x5300
508////////////////////////////////////////////////////////////////////// 505//////////////////////////////////////////////////////////////////////
509// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) 506// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
510ENTRY(kvm_daccess_rights) 507ENTRY(kvm_daccess_rights)
511 KVM_REFLECT(23) 508 KVM_REFLECT(23)
512END(kvm_daccess_rights) 509END(kvm_daccess_rights)
513 510
514 .org kvm_ia64_ivt+0x5400 511 .org kvm_ia64_ivt+0x5400
515///////////////////////////////////////////////////////////////////// 512/////////////////////////////////////////////////////////////////////
516// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) 513// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
517ENTRY(kvm_general_exception) 514ENTRY(kvm_general_exception)
518 KVM_REFLECT(24) 515 KVM_REFLECT(24)
519 KVM_FAULT(24) 516 KVM_FAULT(24)
520END(kvm_general_exception) 517END(kvm_general_exception)
521 518
522 .org kvm_ia64_ivt+0x5500 519 .org kvm_ia64_ivt+0x5500
523////////////////////////////////////////////////////////////////////// 520//////////////////////////////////////////////////////////////////////
524// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) 521// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
525ENTRY(kvm_disabled_fp_reg) 522ENTRY(kvm_disabled_fp_reg)
526 KVM_REFLECT(25) 523 KVM_REFLECT(25)
527END(kvm_disabled_fp_reg) 524END(kvm_disabled_fp_reg)
528 525
529 .org kvm_ia64_ivt+0x5600 526 .org kvm_ia64_ivt+0x5600
530//////////////////////////////////////////////////////////////////// 527////////////////////////////////////////////////////////////////////
531// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) 528// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
532ENTRY(kvm_nat_consumption) 529ENTRY(kvm_nat_consumption)
533 KVM_REFLECT(26) 530 KVM_REFLECT(26)
534END(kvm_nat_consumption) 531END(kvm_nat_consumption)
535 532
536 .org kvm_ia64_ivt+0x5700 533 .org kvm_ia64_ivt+0x5700
537///////////////////////////////////////////////////////////////////// 534/////////////////////////////////////////////////////////////////////
538// 0x5700 Entry 27 (size 16 bundles) Speculation (40) 535// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
539ENTRY(kvm_speculation_vector) 536ENTRY(kvm_speculation_vector)
540 KVM_REFLECT(27) 537 KVM_REFLECT(27)
541END(kvm_speculation_vector) 538END(kvm_speculation_vector)
542 539
543 .org kvm_ia64_ivt+0x5800 540 .org kvm_ia64_ivt+0x5800
544///////////////////////////////////////////////////////////////////// 541/////////////////////////////////////////////////////////////////////
545// 0x5800 Entry 28 (size 16 bundles) Reserved 542// 0x5800 Entry 28 (size 16 bundles) Reserved
546 KVM_FAULT(28) 543 KVM_FAULT(28)
547 544
548 .org kvm_ia64_ivt+0x5900 545 .org kvm_ia64_ivt+0x5900
549/////////////////////////////////////////////////////////////////// 546///////////////////////////////////////////////////////////////////
550// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) 547// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
551ENTRY(kvm_debug_vector) 548ENTRY(kvm_debug_vector)
552 KVM_FAULT(29) 549 KVM_FAULT(29)
553END(kvm_debug_vector) 550END(kvm_debug_vector)
554 551
555 .org kvm_ia64_ivt+0x5a00 552 .org kvm_ia64_ivt+0x5a00
556/////////////////////////////////////////////////////////////// 553///////////////////////////////////////////////////////////////
557// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) 554// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
558ENTRY(kvm_unaligned_access) 555ENTRY(kvm_unaligned_access)
559 KVM_REFLECT(30) 556 KVM_REFLECT(30)
560END(kvm_unaligned_access) 557END(kvm_unaligned_access)
561 558
562 .org kvm_ia64_ivt+0x5b00 559 .org kvm_ia64_ivt+0x5b00
563////////////////////////////////////////////////////////////////////// 560//////////////////////////////////////////////////////////////////////
564// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) 561// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
565ENTRY(kvm_unsupported_data_reference) 562ENTRY(kvm_unsupported_data_reference)
566 KVM_REFLECT(31) 563 KVM_REFLECT(31)
567END(kvm_unsupported_data_reference) 564END(kvm_unsupported_data_reference)
568 565
569 .org kvm_ia64_ivt+0x5c00 566 .org kvm_ia64_ivt+0x5c00
570//////////////////////////////////////////////////////////////////// 567////////////////////////////////////////////////////////////////////
571// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65) 568// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65)
572ENTRY(kvm_floating_point_fault) 569ENTRY(kvm_floating_point_fault)
573 KVM_REFLECT(32) 570 KVM_REFLECT(32)
574END(kvm_floating_point_fault) 571END(kvm_floating_point_fault)
575 572
576 .org kvm_ia64_ivt+0x5d00 573 .org kvm_ia64_ivt+0x5d00
577///////////////////////////////////////////////////////////////////// 574/////////////////////////////////////////////////////////////////////
578// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) 575// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
579ENTRY(kvm_floating_point_trap) 576ENTRY(kvm_floating_point_trap)
580 KVM_REFLECT(33) 577 KVM_REFLECT(33)
581END(kvm_floating_point_trap) 578END(kvm_floating_point_trap)
582 579
583 .org kvm_ia64_ivt+0x5e00 580 .org kvm_ia64_ivt+0x5e00
584////////////////////////////////////////////////////////////////////// 581//////////////////////////////////////////////////////////////////////
585// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) 582// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
586ENTRY(kvm_lower_privilege_trap) 583ENTRY(kvm_lower_privilege_trap)
587 KVM_REFLECT(34) 584 KVM_REFLECT(34)
588END(kvm_lower_privilege_trap) 585END(kvm_lower_privilege_trap)
589 586
590 .org kvm_ia64_ivt+0x5f00 587 .org kvm_ia64_ivt+0x5f00
591////////////////////////////////////////////////////////////////////// 588//////////////////////////////////////////////////////////////////////
592// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) 589// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
593ENTRY(kvm_taken_branch_trap) 590ENTRY(kvm_taken_branch_trap)
594 KVM_REFLECT(35) 591 KVM_REFLECT(35)
595END(kvm_taken_branch_trap) 592END(kvm_taken_branch_trap)
596 593
597 .org kvm_ia64_ivt+0x6000 594 .org kvm_ia64_ivt+0x6000
598//////////////////////////////////////////////////////////////////// 595////////////////////////////////////////////////////////////////////
599// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) 596// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
600ENTRY(kvm_single_step_trap) 597ENTRY(kvm_single_step_trap)
601 KVM_REFLECT(36) 598 KVM_REFLECT(36)
602END(kvm_single_step_trap) 599END(kvm_single_step_trap)
603 .global kvm_virtualization_fault_back 600 .global kvm_virtualization_fault_back
604 .org kvm_ia64_ivt+0x6100 601 .org kvm_ia64_ivt+0x6100
605///////////////////////////////////////////////////////////////////// 602/////////////////////////////////////////////////////////////////////
606// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault 603// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
607ENTRY(kvm_virtualization_fault) 604ENTRY(kvm_virtualization_fault)
608 mov r31=pr 605 mov r31=pr
609 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 606 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
610 ;; 607 ;;
611 st8 [r16] = r1 608 st8 [r16] = r1
612 adds r17 = VMM_VCPU_GP_OFFSET, r21 609 adds r17 = VMM_VCPU_GP_OFFSET, r21
613 ;; 610 ;;
614 ld8 r1 = [r17] 611 ld8 r1 = [r17]
615 cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24 612 cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
616 cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24 613 cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
617 cmp.eq p8,p0=EVENT_MOV_TO_RR,r24 614 cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
618 cmp.eq p9,p0=EVENT_RSM,r24 615 cmp.eq p9,p0=EVENT_RSM,r24
619 cmp.eq p10,p0=EVENT_SSM,r24 616 cmp.eq p10,p0=EVENT_SSM,r24
620 cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 617 cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
621 cmp.eq p12,p0=EVENT_THASH,r24 618 cmp.eq p12,p0=EVENT_THASH,r24
622 (p6) br.dptk.many kvm_asm_mov_from_ar 619(p6) br.dptk.many kvm_asm_mov_from_ar
623 (p7) br.dptk.many kvm_asm_mov_from_rr 620(p7) br.dptk.many kvm_asm_mov_from_rr
624 (p8) br.dptk.many kvm_asm_mov_to_rr 621(p8) br.dptk.many kvm_asm_mov_to_rr
625 (p9) br.dptk.many kvm_asm_rsm 622(p9) br.dptk.many kvm_asm_rsm
626 (p10) br.dptk.many kvm_asm_ssm 623(p10) br.dptk.many kvm_asm_ssm
627 (p11) br.dptk.many kvm_asm_mov_to_psr 624(p11) br.dptk.many kvm_asm_mov_to_psr
628 (p12) br.dptk.many kvm_asm_thash 625(p12) br.dptk.many kvm_asm_thash
629 ;; 626 ;;
630kvm_virtualization_fault_back: 627kvm_virtualization_fault_back:
631 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 628 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
632 ;; 629 ;;
633 ld8 r1 = [r16] 630 ld8 r1 = [r16]
634 ;; 631 ;;
635 mov r19=37 632 mov r19=37
636 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 633 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
637 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 634 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
638 ;; 635 ;;
639 st8 [r16] = r24 636 st8 [r16] = r24
640 st8 [r17] = r25 637 st8 [r17] = r25
641 ;; 638 ;;
642 cmp.ne p6,p0=EVENT_RFI, r24 639 cmp.ne p6,p0=EVENT_RFI, r24
643 (p6) br.sptk kvm_dispatch_virtualization_fault 640(p6) br.sptk kvm_dispatch_virtualization_fault
644 ;; 641 ;;
645 adds r18=VMM_VPD_BASE_OFFSET,r21 642 adds r18=VMM_VPD_BASE_OFFSET,r21
646 ;; 643 ;;
647 ld8 r18=[r18] 644 ld8 r18=[r18]
648 ;; 645 ;;
649 adds r18=VMM_VPD_VIFS_OFFSET,r18 646 adds r18=VMM_VPD_VIFS_OFFSET,r18
650 ;; 647 ;;
651 ld8 r18=[r18] 648 ld8 r18=[r18]
652 ;; 649 ;;
653 tbit.z p6,p0=r18,63 650 tbit.z p6,p0=r18,63
654 (p6) br.sptk kvm_dispatch_virtualization_fault 651(p6) br.sptk kvm_dispatch_virtualization_fault
655 ;; 652 ;;
656 //if vifs.v=1 desert current register frame 653//if vifs.v=1 desert current register frame
657 alloc r18=ar.pfs,0,0,0,0 654 alloc r18=ar.pfs,0,0,0,0
658 br.sptk kvm_dispatch_virtualization_fault 655 br.sptk kvm_dispatch_virtualization_fault
659END(kvm_virtualization_fault) 656END(kvm_virtualization_fault)
660 657
661 .org kvm_ia64_ivt+0x6200 658 .org kvm_ia64_ivt+0x6200
662////////////////////////////////////////////////////////////// 659//////////////////////////////////////////////////////////////
663// 0x6200 Entry 38 (size 16 bundles) Reserved 660// 0x6200 Entry 38 (size 16 bundles) Reserved
664 KVM_FAULT(38) 661 KVM_FAULT(38)
665 662
666 .org kvm_ia64_ivt+0x6300 663 .org kvm_ia64_ivt+0x6300
667///////////////////////////////////////////////////////////////// 664/////////////////////////////////////////////////////////////////
668// 0x6300 Entry 39 (size 16 bundles) Reserved 665// 0x6300 Entry 39 (size 16 bundles) Reserved
669 KVM_FAULT(39) 666 KVM_FAULT(39)
670 667
671 .org kvm_ia64_ivt+0x6400 668 .org kvm_ia64_ivt+0x6400
672///////////////////////////////////////////////////////////////// 669/////////////////////////////////////////////////////////////////
673// 0x6400 Entry 40 (size 16 bundles) Reserved 670// 0x6400 Entry 40 (size 16 bundles) Reserved
674 KVM_FAULT(40) 671 KVM_FAULT(40)
675 672
676 .org kvm_ia64_ivt+0x6500 673 .org kvm_ia64_ivt+0x6500
677////////////////////////////////////////////////////////////////// 674//////////////////////////////////////////////////////////////////
678// 0x6500 Entry 41 (size 16 bundles) Reserved 675// 0x6500 Entry 41 (size 16 bundles) Reserved
679 KVM_FAULT(41) 676 KVM_FAULT(41)
680 677
681 .org kvm_ia64_ivt+0x6600 678 .org kvm_ia64_ivt+0x6600
682////////////////////////////////////////////////////////////////// 679//////////////////////////////////////////////////////////////////
683// 0x6600 Entry 42 (size 16 bundles) Reserved 680// 0x6600 Entry 42 (size 16 bundles) Reserved
684 KVM_FAULT(42) 681 KVM_FAULT(42)
685 682
686 .org kvm_ia64_ivt+0x6700 683 .org kvm_ia64_ivt+0x6700
687////////////////////////////////////////////////////////////////// 684//////////////////////////////////////////////////////////////////
688// 0x6700 Entry 43 (size 16 bundles) Reserved 685// 0x6700 Entry 43 (size 16 bundles) Reserved
689 KVM_FAULT(43) 686 KVM_FAULT(43)
690 687
691 .org kvm_ia64_ivt+0x6800 688 .org kvm_ia64_ivt+0x6800
692////////////////////////////////////////////////////////////////// 689//////////////////////////////////////////////////////////////////
693// 0x6800 Entry 44 (size 16 bundles) Reserved 690// 0x6800 Entry 44 (size 16 bundles) Reserved
694 KVM_FAULT(44) 691 KVM_FAULT(44)
695 692
696 .org kvm_ia64_ivt+0x6900 693 .org kvm_ia64_ivt+0x6900
697/////////////////////////////////////////////////////////////////// 694///////////////////////////////////////////////////////////////////
698// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception 695// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception
699//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) 696//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
700ENTRY(kvm_ia32_exception) 697ENTRY(kvm_ia32_exception)
701 KVM_FAULT(45) 698 KVM_FAULT(45)
702END(kvm_ia32_exception) 699END(kvm_ia32_exception)
703 700
704 .org kvm_ia64_ivt+0x6a00 701 .org kvm_ia64_ivt+0x6a00
705//////////////////////////////////////////////////////////////////// 702////////////////////////////////////////////////////////////////////
706// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) 703// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
707ENTRY(kvm_ia32_intercept) 704ENTRY(kvm_ia32_intercept)
708 KVM_FAULT(47) 705 KVM_FAULT(47)
709END(kvm_ia32_intercept) 706END(kvm_ia32_intercept)
710 707
711 .org kvm_ia64_ivt+0x6c00 708 .org kvm_ia64_ivt+0x6c00
712///////////////////////////////////////////////////////////////////// 709/////////////////////////////////////////////////////////////////////
713// 0x6c00 Entry 48 (size 16 bundles) Reserved 710// 0x6c00 Entry 48 (size 16 bundles) Reserved
714 KVM_FAULT(48) 711 KVM_FAULT(48)
715 712
716 .org kvm_ia64_ivt+0x6d00 713 .org kvm_ia64_ivt+0x6d00
717////////////////////////////////////////////////////////////////////// 714//////////////////////////////////////////////////////////////////////
718// 0x6d00 Entry 49 (size 16 bundles) Reserved 715// 0x6d00 Entry 49 (size 16 bundles) Reserved
719 KVM_FAULT(49) 716 KVM_FAULT(49)
720 717
721 .org kvm_ia64_ivt+0x6e00 718 .org kvm_ia64_ivt+0x6e00
722////////////////////////////////////////////////////////////////////// 719//////////////////////////////////////////////////////////////////////
723// 0x6e00 Entry 50 (size 16 bundles) Reserved 720// 0x6e00 Entry 50 (size 16 bundles) Reserved
724 KVM_FAULT(50) 721 KVM_FAULT(50)
725 722
726 .org kvm_ia64_ivt+0x6f00 723 .org kvm_ia64_ivt+0x6f00
727///////////////////////////////////////////////////////////////////// 724/////////////////////////////////////////////////////////////////////
728// 0x6f00 Entry 51 (size 16 bundles) Reserved 725// 0x6f00 Entry 51 (size 16 bundles) Reserved
729 KVM_FAULT(52) 726 KVM_FAULT(52)
730 727
731 .org kvm_ia64_ivt+0x7100 728 .org kvm_ia64_ivt+0x7100
732//////////////////////////////////////////////////////////////////// 729////////////////////////////////////////////////////////////////////
733// 0x7100 Entry 53 (size 16 bundles) Reserved 730// 0x7100 Entry 53 (size 16 bundles) Reserved
734 KVM_FAULT(53) 731 KVM_FAULT(53)
735 732
736 .org kvm_ia64_ivt+0x7200 733 .org kvm_ia64_ivt+0x7200
737///////////////////////////////////////////////////////////////////// 734/////////////////////////////////////////////////////////////////////
738// 0x7200 Entry 54 (size 16 bundles) Reserved 735// 0x7200 Entry 54 (size 16 bundles) Reserved
739 KVM_FAULT(54) 736 KVM_FAULT(54)
740 737
741 .org kvm_ia64_ivt+0x7300 738 .org kvm_ia64_ivt+0x7300
742//////////////////////////////////////////////////////////////////// 739////////////////////////////////////////////////////////////////////
743// 0x7300 Entry 55 (size 16 bundles) Reserved 740// 0x7300 Entry 55 (size 16 bundles) Reserved
744 KVM_FAULT(55) 741 KVM_FAULT(55)
745 742
746 .org kvm_ia64_ivt+0x7400 743 .org kvm_ia64_ivt+0x7400
747//////////////////////////////////////////////////////////////////// 744////////////////////////////////////////////////////////////////////
748// 0x7400 Entry 56 (size 16 bundles) Reserved 745// 0x7400 Entry 56 (size 16 bundles) Reserved
749 KVM_FAULT(56) 746 KVM_FAULT(56)
750 747
751 .org kvm_ia64_ivt+0x7500 748 .org kvm_ia64_ivt+0x7500
752///////////////////////////////////////////////////////////////////// 749/////////////////////////////////////////////////////////////////////
753// 0x7500 Entry 57 (size 16 bundles) Reserved 750// 0x7500 Entry 57 (size 16 bundles) Reserved
754 KVM_FAULT(57) 751 KVM_FAULT(57)
755 752
756 .org kvm_ia64_ivt+0x7600 753 .org kvm_ia64_ivt+0x7600
757///////////////////////////////////////////////////////////////////// 754/////////////////////////////////////////////////////////////////////
758// 0x7600 Entry 58 (size 16 bundles) Reserved 755// 0x7600 Entry 58 (size 16 bundles) Reserved
759 KVM_FAULT(58) 756 KVM_FAULT(58)
760 757
761 .org kvm_ia64_ivt+0x7700 758 .org kvm_ia64_ivt+0x7700
762//////////////////////////////////////////////////////////////////// 759////////////////////////////////////////////////////////////////////
763// 0x7700 Entry 59 (size 16 bundles) Reserved 760// 0x7700 Entry 59 (size 16 bundles) Reserved
764 KVM_FAULT(59) 761 KVM_FAULT(59)
765 762
766 .org kvm_ia64_ivt+0x7800 763 .org kvm_ia64_ivt+0x7800
767//////////////////////////////////////////////////////////////////// 764////////////////////////////////////////////////////////////////////
768// 0x7800 Entry 60 (size 16 bundles) Reserved 765// 0x7800 Entry 60 (size 16 bundles) Reserved
769 KVM_FAULT(60) 766 KVM_FAULT(60)
770 767
771 .org kvm_ia64_ivt+0x7900 768 .org kvm_ia64_ivt+0x7900
772///////////////////////////////////////////////////////////////////// 769/////////////////////////////////////////////////////////////////////
773// 0x7900 Entry 61 (size 16 bundles) Reserved 770// 0x7900 Entry 61 (size 16 bundles) Reserved
774 KVM_FAULT(61) 771 KVM_FAULT(61)
775 772
776 .org kvm_ia64_ivt+0x7a00 773 .org kvm_ia64_ivt+0x7a00
777///////////////////////////////////////////////////////////////////// 774/////////////////////////////////////////////////////////////////////
778// 0x7a00 Entry 62 (size 16 bundles) Reserved 775// 0x7a00 Entry 62 (size 16 bundles) Reserved
779 KVM_FAULT(62) 776 KVM_FAULT(62)
780 777
781 .org kvm_ia64_ivt+0x7b00 778 .org kvm_ia64_ivt+0x7b00
782///////////////////////////////////////////////////////////////////// 779/////////////////////////////////////////////////////////////////////
783// 0x7b00 Entry 63 (size 16 bundles) Reserved 780// 0x7b00 Entry 63 (size 16 bundles) Reserved
784 KVM_FAULT(63) 781 KVM_FAULT(63)
785 782
786 .org kvm_ia64_ivt+0x7c00 783 .org kvm_ia64_ivt+0x7c00
787//////////////////////////////////////////////////////////////////// 784////////////////////////////////////////////////////////////////////
788// 0x7c00 Entry 64 (size 16 bundles) Reserved 785// 0x7c00 Entry 64 (size 16 bundles) Reserved
789 KVM_FAULT(64) 786 KVM_FAULT(64)
790 787
791 .org kvm_ia64_ivt+0x7d00 788 .org kvm_ia64_ivt+0x7d00
792///////////////////////////////////////////////////////////////////// 789/////////////////////////////////////////////////////////////////////
793// 0x7d00 Entry 65 (size 16 bundles) Reserved 790// 0x7d00 Entry 65 (size 16 bundles) Reserved
794 KVM_FAULT(65) 791 KVM_FAULT(65)
795 792
796 .org kvm_ia64_ivt+0x7e00 793 .org kvm_ia64_ivt+0x7e00
797///////////////////////////////////////////////////////////////////// 794/////////////////////////////////////////////////////////////////////
798// 0x7e00 Entry 66 (size 16 bundles) Reserved 795// 0x7e00 Entry 66 (size 16 bundles) Reserved
799 KVM_FAULT(66) 796 KVM_FAULT(66)
800 797
801 .org kvm_ia64_ivt+0x7f00 798 .org kvm_ia64_ivt+0x7f00
802//////////////////////////////////////////////////////////////////// 799////////////////////////////////////////////////////////////////////
803// 0x7f00 Entry 67 (size 16 bundles) Reserved 800// 0x7f00 Entry 67 (size 16 bundles) Reserved
804 KVM_FAULT(67) 801 KVM_FAULT(67)
805 802
806 .org kvm_ia64_ivt+0x8000 803 .org kvm_ia64_ivt+0x8000
807// There is no particular reason for this code to be here, other than that 804// There is no particular reason for this code to be here, other than that
@@ -811,132 +808,128 @@ END(kvm_ia32_intercept)
811 808
812 809
813ENTRY(kvm_dtlb_miss_dispatch) 810ENTRY(kvm_dtlb_miss_dispatch)
814 mov r19 = 2 811 mov r19 = 2
815 KVM_SAVE_MIN_WITH_COVER_R19 812 KVM_SAVE_MIN_WITH_COVER_R19
816 alloc r14=ar.pfs,0,0,3,0 813 alloc r14=ar.pfs,0,0,3,0
817 mov out0=cr.ifa 814 mov out0=cr.ifa
818 mov out1=r15 815 mov out1=r15
819 adds r3=8,r2 // set up second base pointer 816 adds r3=8,r2 // set up second base pointer
820 ;; 817 ;;
821 ssm psr.ic 818 ssm psr.ic
822 ;; 819 ;;
823 srlz.i // guarantee that interruption collection is on 820 srlz.i // guarantee that interruption collection is on
824 ;; 821 ;;
825 //(p15) ssm psr.i // restore psr.i 822 //(p15) ssm psr.i // restore psr.i
826 addl r14=@gprel(ia64_leave_hypervisor_prepare),gp 823 addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
827 ;; 824 ;;
828 KVM_SAVE_REST 825 KVM_SAVE_REST
829 KVM_SAVE_EXTRA 826 KVM_SAVE_EXTRA
830 mov rp=r14 827 mov rp=r14
831 ;; 828 ;;
832 adds out2=16,r12 829 adds out2=16,r12
833 br.call.sptk.many b6=kvm_page_fault 830 br.call.sptk.many b6=kvm_page_fault
834END(kvm_dtlb_miss_dispatch) 831END(kvm_dtlb_miss_dispatch)
835 832
836ENTRY(kvm_itlb_miss_dispatch) 833ENTRY(kvm_itlb_miss_dispatch)
837 834
838 KVM_SAVE_MIN_WITH_COVER_R19 835 KVM_SAVE_MIN_WITH_COVER_R19
839 alloc r14=ar.pfs,0,0,3,0 836 alloc r14=ar.pfs,0,0,3,0
840 mov out0=cr.ifa 837 mov out0=cr.ifa
841 mov out1=r15 838 mov out1=r15
842 adds r3=8,r2 // set up second base pointer 839 adds r3=8,r2 // set up second base pointer
843 ;; 840 ;;
844 ssm psr.ic 841 ssm psr.ic
845 ;; 842 ;;
846 srlz.i // guarantee that interruption collection is on 843 srlz.i // guarantee that interruption collection is on
847 ;; 844 ;;
848 //(p15) ssm psr.i // restore psr.i 845 //(p15) ssm psr.i // restore psr.i
849 addl r14=@gprel(ia64_leave_hypervisor),gp 846 addl r14=@gprel(ia64_leave_hypervisor),gp
850 ;; 847 ;;
851 KVM_SAVE_REST 848 KVM_SAVE_REST
852 mov rp=r14 849 mov rp=r14
853 ;; 850 ;;
854 adds out2=16,r12 851 adds out2=16,r12
855 br.call.sptk.many b6=kvm_page_fault 852 br.call.sptk.many b6=kvm_page_fault
856END(kvm_itlb_miss_dispatch) 853END(kvm_itlb_miss_dispatch)
857 854
858ENTRY(kvm_dispatch_reflection) 855ENTRY(kvm_dispatch_reflection)
859 /* 856/*
860 * Input: 857 * Input:
861 * psr.ic: off 858 * psr.ic: off
862 * r19: intr type (offset into ivt, see ia64_int.h) 859 * r19: intr type (offset into ivt, see ia64_int.h)
863 * r31: contains saved predicates (pr) 860 * r31: contains saved predicates (pr)
864 */ 861 */
865 KVM_SAVE_MIN_WITH_COVER_R19 862 KVM_SAVE_MIN_WITH_COVER_R19
866 alloc r14=ar.pfs,0,0,5,0 863 alloc r14=ar.pfs,0,0,5,0
867 mov out0=cr.ifa 864 mov out0=cr.ifa
868 mov out1=cr.isr 865 mov out1=cr.isr
869 mov out2=cr.iim 866 mov out2=cr.iim
870 mov out3=r15 867 mov out3=r15
871 adds r3=8,r2 // set up second base pointer 868 adds r3=8,r2 // set up second base pointer
872 ;; 869 ;;
873 ssm psr.ic 870 ssm psr.ic
874 ;; 871 ;;
875 srlz.i // guarantee that interruption collection is on 872 srlz.i // guarantee that interruption collection is on
876 ;; 873 ;;
877 //(p15) ssm psr.i // restore psr.i 874 //(p15) ssm psr.i // restore psr.i
878 addl r14=@gprel(ia64_leave_hypervisor),gp 875 addl r14=@gprel(ia64_leave_hypervisor),gp
879 ;; 876 ;;
880 KVM_SAVE_REST 877 KVM_SAVE_REST
881 mov rp=r14 878 mov rp=r14
882 ;; 879 ;;
883 adds out4=16,r12 880 adds out4=16,r12
884 br.call.sptk.many b6=reflect_interruption 881 br.call.sptk.many b6=reflect_interruption
885END(kvm_dispatch_reflection) 882END(kvm_dispatch_reflection)
886 883
887ENTRY(kvm_dispatch_virtualization_fault) 884ENTRY(kvm_dispatch_virtualization_fault)
888 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 885 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
889 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 886 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
890 ;; 887 ;;
891 st8 [r16] = r24 888 st8 [r16] = r24
892 st8 [r17] = r25 889 st8 [r17] = r25
893 ;; 890 ;;
894 KVM_SAVE_MIN_WITH_COVER_R19 891 KVM_SAVE_MIN_WITH_COVER_R19
895 ;; 892 ;;
896 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) 893 alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!)
897 mov out0=r13 //vcpu 894 mov out0=r13 //vcpu
898 adds r3=8,r2 // set up second base pointer 895 adds r3=8,r2 // set up second base pointer
899 ;; 896 ;;
900 ssm psr.ic 897 ssm psr.ic
901 ;; 898 ;;
902 srlz.i // guarantee that interruption collection is on 899 srlz.i // guarantee that interruption collection is on
903 ;; 900 ;;
904 //(p15) ssm psr.i // restore psr.i 901 //(p15) ssm psr.i // restore psr.i
905 addl r14=@gprel(ia64_leave_hypervisor_prepare),gp 902 addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
906 ;; 903 ;;
907 KVM_SAVE_REST 904 KVM_SAVE_REST
908 KVM_SAVE_EXTRA 905 KVM_SAVE_EXTRA
909 mov rp=r14 906 mov rp=r14
910 ;; 907 ;;
911 adds out1=16,sp //regs 908 adds out1=16,sp //regs
912 br.call.sptk.many b6=kvm_emulate 909 br.call.sptk.many b6=kvm_emulate
913END(kvm_dispatch_virtualization_fault) 910END(kvm_dispatch_virtualization_fault)
914 911
915 912
916ENTRY(kvm_dispatch_interrupt) 913ENTRY(kvm_dispatch_interrupt)
917 KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 914 KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
918 ;; 915 ;;
919 alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group 916 alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
920 //mov out0=cr.ivr // pass cr.ivr as first arg 917 adds r3=8,r2 // set up second base pointer for SAVE_REST
921 adds r3=8,r2 // set up second base pointer for SAVE_REST 918 ;;
922 ;; 919 ssm psr.ic
923 ssm psr.ic 920 ;;
924 ;; 921 srlz.i
925 srlz.i 922 ;;
926 ;; 923 //(p15) ssm psr.i
927 //(p15) ssm psr.i 924 addl r14=@gprel(ia64_leave_hypervisor),gp
928 addl r14=@gprel(ia64_leave_hypervisor),gp 925 ;;
929 ;; 926 KVM_SAVE_REST
930 KVM_SAVE_REST 927 mov rp=r14
931 mov rp=r14 928 ;;
932 ;; 929 mov out0=r13 // pass pointer to pt_regs as second arg
933 mov out0=r13 // pass pointer to pt_regs as second arg 930 br.call.sptk.many b6=kvm_ia64_handle_irq
934 br.call.sptk.many b6=kvm_ia64_handle_irq
935END(kvm_dispatch_interrupt) 931END(kvm_dispatch_interrupt)
936 932
937
938
939
940GLOBAL_ENTRY(ia64_leave_nested) 933GLOBAL_ENTRY(ia64_leave_nested)
941 rsm psr.i 934 rsm psr.i
942 ;; 935 ;;
@@ -1015,7 +1008,7 @@ GLOBAL_ENTRY(ia64_leave_nested)
1015 ;; 1008 ;;
1016 ldf.fill f11=[r2] 1009 ldf.fill f11=[r2]
1017// mov r18=r13 1010// mov r18=r13
1018// mov r21=r13 1011// mov r21=r13
1019 adds r16=PT(CR_IPSR)+16,r12 1012 adds r16=PT(CR_IPSR)+16,r12
1020 adds r17=PT(CR_IIP)+16,r12 1013 adds r17=PT(CR_IIP)+16,r12
1021 ;; 1014 ;;
@@ -1065,138 +1058,135 @@ GLOBAL_ENTRY(ia64_leave_nested)
1065 rfi 1058 rfi
1066END(ia64_leave_nested) 1059END(ia64_leave_nested)
1067 1060
1068
1069
1070GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) 1061GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
1071 /* 1062/*
1072 * work.need_resched etc. mustn't get changed 1063 * work.need_resched etc. mustn't get changed
1073 *by this CPU before it returns to 1064 *by this CPU before it returns to
1074 ;; 1065 * user- or fsys-mode, hence we disable interrupts early on:
1075 * user- or fsys-mode, hence we disable interrupts early on: 1066 */
1076 */ 1067 adds r2 = PT(R4)+16,r12
1077 adds r2 = PT(R4)+16,r12 1068 adds r3 = PT(R5)+16,r12
1078 adds r3 = PT(R5)+16,r12 1069 adds r8 = PT(EML_UNAT)+16,r12
1079 adds r8 = PT(EML_UNAT)+16,r12 1070 ;;
1080 ;; 1071 ld8 r8 = [r8]
1081 ld8 r8 = [r8] 1072 ;;
1082 ;; 1073 mov ar.unat=r8
1083 mov ar.unat=r8 1074 ;;
1084 ;; 1075 ld8.fill r4=[r2],16 //load r4
1085 ld8.fill r4=[r2],16 //load r4 1076 ld8.fill r5=[r3],16 //load r5
1086 ld8.fill r5=[r3],16 //load r5 1077 ;;
1087 ;; 1078 ld8.fill r6=[r2] //load r6
1088 ld8.fill r6=[r2] //load r6 1079 ld8.fill r7=[r3] //load r7
1089 ld8.fill r7=[r3] //load r7 1080 ;;
1090 ;;
1091END(ia64_leave_hypervisor_prepare) 1081END(ia64_leave_hypervisor_prepare)
1092//fall through 1082//fall through
1093GLOBAL_ENTRY(ia64_leave_hypervisor) 1083GLOBAL_ENTRY(ia64_leave_hypervisor)
1094 rsm psr.i 1084 rsm psr.i
1095 ;; 1085 ;;
1096 br.call.sptk.many b0=leave_hypervisor_tail 1086 br.call.sptk.many b0=leave_hypervisor_tail
1097 ;; 1087 ;;
1098 adds r20=PT(PR)+16,r12 1088 adds r20=PT(PR)+16,r12
1099 adds r8=PT(EML_UNAT)+16,r12 1089 adds r8=PT(EML_UNAT)+16,r12
1100 ;; 1090 ;;
1101 ld8 r8=[r8] 1091 ld8 r8=[r8]
1102 ;; 1092 ;;
1103 mov ar.unat=r8 1093 mov ar.unat=r8
1104 ;; 1094 ;;
1105 lfetch [r20],PT(CR_IPSR)-PT(PR) 1095 lfetch [r20],PT(CR_IPSR)-PT(PR)
1106 adds r2 = PT(B6)+16,r12 1096 adds r2 = PT(B6)+16,r12
1107 adds r3 = PT(B7)+16,r12 1097 adds r3 = PT(B7)+16,r12
1108 ;; 1098 ;;
1109 lfetch [r20] 1099 lfetch [r20]
1110 ;; 1100 ;;
1111 ld8 r24=[r2],16 /* B6 */ 1101 ld8 r24=[r2],16 /* B6 */
1112 ld8 r25=[r3],16 /* B7 */ 1102 ld8 r25=[r3],16 /* B7 */
1113 ;; 1103 ;;
1114 ld8 r26=[r2],16 /* ar_csd */ 1104 ld8 r26=[r2],16 /* ar_csd */
1115 ld8 r27=[r3],16 /* ar_ssd */ 1105 ld8 r27=[r3],16 /* ar_ssd */
1116 mov b6 = r24 1106 mov b6 = r24
1117 ;; 1107 ;;
1118 ld8.fill r8=[r2],16 1108 ld8.fill r8=[r2],16
1119 ld8.fill r9=[r3],16 1109 ld8.fill r9=[r3],16
1120 mov b7 = r25 1110 mov b7 = r25
1121 ;; 1111 ;;
1122 mov ar.csd = r26 1112 mov ar.csd = r26
1123 mov ar.ssd = r27 1113 mov ar.ssd = r27
1124 ;; 1114 ;;
1125 ld8.fill r10=[r2],PT(R15)-PT(R10) 1115 ld8.fill r10=[r2],PT(R15)-PT(R10)
1126 ld8.fill r11=[r3],PT(R14)-PT(R11) 1116 ld8.fill r11=[r3],PT(R14)-PT(R11)
1127 ;; 1117 ;;
1128 ld8.fill r15=[r2],PT(R16)-PT(R15) 1118 ld8.fill r15=[r2],PT(R16)-PT(R15)
1129 ld8.fill r14=[r3],PT(R17)-PT(R14) 1119 ld8.fill r14=[r3],PT(R17)-PT(R14)
1130 ;; 1120 ;;
1131 ld8.fill r16=[r2],16 1121 ld8.fill r16=[r2],16
1132 ld8.fill r17=[r3],16 1122 ld8.fill r17=[r3],16
1133 ;; 1123 ;;
1134 ld8.fill r18=[r2],16 1124 ld8.fill r18=[r2],16
1135 ld8.fill r19=[r3],16 1125 ld8.fill r19=[r3],16
1136 ;; 1126 ;;
1137 ld8.fill r20=[r2],16 1127 ld8.fill r20=[r2],16
1138 ld8.fill r21=[r3],16 1128 ld8.fill r21=[r3],16
1139 ;; 1129 ;;
1140 ld8.fill r22=[r2],16 1130 ld8.fill r22=[r2],16
1141 ld8.fill r23=[r3],16 1131 ld8.fill r23=[r3],16
1142 ;; 1132 ;;
1143 ld8.fill r24=[r2],16 1133 ld8.fill r24=[r2],16
1144 ld8.fill r25=[r3],16 1134 ld8.fill r25=[r3],16
1145 ;; 1135 ;;
1146 ld8.fill r26=[r2],16 1136 ld8.fill r26=[r2],16
1147 ld8.fill r27=[r3],16 1137 ld8.fill r27=[r3],16
1148 ;; 1138 ;;
1149 ld8.fill r28=[r2],16 1139 ld8.fill r28=[r2],16
1150 ld8.fill r29=[r3],16 1140 ld8.fill r29=[r3],16
1151 ;; 1141 ;;
1152 ld8.fill r30=[r2],PT(F6)-PT(R30) 1142 ld8.fill r30=[r2],PT(F6)-PT(R30)
1153 ld8.fill r31=[r3],PT(F7)-PT(R31) 1143 ld8.fill r31=[r3],PT(F7)-PT(R31)
1154 ;; 1144 ;;
1155 rsm psr.i | psr.ic 1145 rsm psr.i | psr.ic
1156 // initiate turning off of interrupt and interruption collection 1146 // initiate turning off of interrupt and interruption collection
1157 invala // invalidate ALAT 1147 invala // invalidate ALAT
1158 ;; 1148 ;;
1159 srlz.i // ensure interruption collection is off 1149 srlz.i // ensure interruption collection is off
1160 ;; 1150 ;;
1161 bsw.0 1151 bsw.0
1162 ;; 1152 ;;
1163 adds r16 = PT(CR_IPSR)+16,r12 1153 adds r16 = PT(CR_IPSR)+16,r12
1164 adds r17 = PT(CR_IIP)+16,r12 1154 adds r17 = PT(CR_IIP)+16,r12
1165 mov r21=r13 // get current 1155 mov r21=r13 // get current
1166 ;; 1156 ;;
1167 ld8 r31=[r16],16 // load cr.ipsr 1157 ld8 r31=[r16],16 // load cr.ipsr
1168 ld8 r30=[r17],16 // load cr.iip 1158 ld8 r30=[r17],16 // load cr.iip
1169 ;; 1159 ;;
1170 ld8 r29=[r16],16 // load cr.ifs 1160 ld8 r29=[r16],16 // load cr.ifs
1171 ld8 r28=[r17],16 // load ar.unat 1161 ld8 r28=[r17],16 // load ar.unat
1172 ;; 1162 ;;
1173 ld8 r27=[r16],16 // load ar.pfs 1163 ld8 r27=[r16],16 // load ar.pfs
1174 ld8 r26=[r17],16 // load ar.rsc 1164 ld8 r26=[r17],16 // load ar.rsc
1175 ;; 1165 ;;
1176 ld8 r25=[r16],16 // load ar.rnat 1166 ld8 r25=[r16],16 // load ar.rnat
1177 ld8 r24=[r17],16 // load ar.bspstore 1167 ld8 r24=[r17],16 // load ar.bspstore
1178 ;; 1168 ;;
1179 ld8 r23=[r16],16 // load predicates 1169 ld8 r23=[r16],16 // load predicates
1180 ld8 r22=[r17],16 // load b0 1170 ld8 r22=[r17],16 // load b0
1181 ;; 1171 ;;
1182 ld8 r20=[r16],16 // load ar.rsc value for "loadrs" 1172 ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
1183 ld8.fill r1=[r17],16 //load r1 1173 ld8.fill r1=[r17],16 //load r1
1184 ;; 1174 ;;
1185 ld8.fill r12=[r16],16 //load r12 1175 ld8.fill r12=[r16],16 //load r12
1186 ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13 1176 ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
1187 ;; 1177 ;;
1188 ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr 1178 ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
1189 ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2 1179 ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
1190 ;; 1180 ;;
1191 ld8.fill r3=[r16] //load r3 1181 ld8.fill r3=[r16] //load r3
1192 ld8 r18=[r17] //load ar_ccv 1182 ld8 r18=[r17] //load ar_ccv
1193 ;; 1183 ;;
1194 mov ar.fpsr=r19 1184 mov ar.fpsr=r19
1195 mov ar.ccv=r18 1185 mov ar.ccv=r18
1196 shr.u r18=r20,16 1186 shr.u r18=r20,16
1197 ;; 1187 ;;
1198kvm_rbs_switch: 1188kvm_rbs_switch:
1199 mov r19=96 1189 mov r19=96
1200 1190
1201kvm_dont_preserve_current_frame: 1191kvm_dont_preserve_current_frame:
1202/* 1192/*
@@ -1208,76 +1198,76 @@ kvm_dont_preserve_current_frame:
1208# define pReturn p7 1198# define pReturn p7
1209# define Nregs 14 1199# define Nregs 14
1210 1200
1211 alloc loc0=ar.pfs,2,Nregs-2,2,0 1201 alloc loc0=ar.pfs,2,Nregs-2,2,0
1212 shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) 1202 shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
1213 sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize 1203 sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize
1214 ;; 1204 ;;
1215 mov ar.rsc=r20 // load ar.rsc to be used for "loadrs" 1205 mov ar.rsc=r20 // load ar.rsc to be used for "loadrs"
1216 shladd in0=loc1,3,r19 1206 shladd in0=loc1,3,r19
1217 mov in1=0 1207 mov in1=0
1218 ;; 1208 ;;
1219 TEXT_ALIGN(32) 1209 TEXT_ALIGN(32)
1220kvm_rse_clear_invalid: 1210kvm_rse_clear_invalid:
1221 alloc loc0=ar.pfs,2,Nregs-2,2,0 1211 alloc loc0=ar.pfs,2,Nregs-2,2,0
1222 cmp.lt pRecurse,p0=Nregs*8,in0 1212 cmp.lt pRecurse,p0=Nregs*8,in0
1223 // if more than Nregs regs left to clear, (re)curse 1213 // if more than Nregs regs left to clear, (re)curse
1224 add out0=-Nregs*8,in0 1214 add out0=-Nregs*8,in0
1225 add out1=1,in1 // increment recursion count 1215 add out1=1,in1 // increment recursion count
1226 mov loc1=0 1216 mov loc1=0
1227 mov loc2=0 1217 mov loc2=0
1228 ;; 1218 ;;
1229 mov loc3=0 1219 mov loc3=0
1230 mov loc4=0 1220 mov loc4=0
1231 mov loc5=0 1221 mov loc5=0
1232 mov loc6=0 1222 mov loc6=0
1233 mov loc7=0 1223 mov loc7=0
1234(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid 1224(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid
1235 ;; 1225 ;;
1236 mov loc8=0 1226 mov loc8=0
1237 mov loc9=0 1227 mov loc9=0
1238 cmp.ne pReturn,p0=r0,in1 1228 cmp.ne pReturn,p0=r0,in1
1239 // if recursion count != 0, we need to do a br.ret 1229 // if recursion count != 0, we need to do a br.ret
1240 mov loc10=0 1230 mov loc10=0
1241 mov loc11=0 1231 mov loc11=0
1242(pReturn) br.ret.dptk.many b0 1232(pReturn) br.ret.dptk.many b0
1243 1233
1244# undef pRecurse 1234# undef pRecurse
1245# undef pReturn 1235# undef pReturn
1246 1236
1247// loadrs has already been shifted 1237// loadrs has already been shifted
1248 alloc r16=ar.pfs,0,0,0,0 // drop current register frame 1238 alloc r16=ar.pfs,0,0,0,0 // drop current register frame
1249 ;; 1239 ;;
1250 loadrs 1240 loadrs
1251 ;; 1241 ;;
1252 mov ar.bspstore=r24 1242 mov ar.bspstore=r24
1253 ;; 1243 ;;
1254 mov ar.unat=r28 1244 mov ar.unat=r28
1255 mov ar.rnat=r25 1245 mov ar.rnat=r25
1256 mov ar.rsc=r26 1246 mov ar.rsc=r26
1257 ;; 1247 ;;
1258 mov cr.ipsr=r31 1248 mov cr.ipsr=r31
1259 mov cr.iip=r30 1249 mov cr.iip=r30
1260 mov cr.ifs=r29 1250 mov cr.ifs=r29
1261 mov ar.pfs=r27 1251 mov ar.pfs=r27
1262 adds r18=VMM_VPD_BASE_OFFSET,r21 1252 adds r18=VMM_VPD_BASE_OFFSET,r21
1263 ;; 1253 ;;
1264 ld8 r18=[r18] //vpd 1254 ld8 r18=[r18] //vpd
1265 adds r17=VMM_VCPU_ISR_OFFSET,r21 1255 adds r17=VMM_VCPU_ISR_OFFSET,r21
1266 ;; 1256 ;;
1267 ld8 r17=[r17] 1257 ld8 r17=[r17]
1268 adds r19=VMM_VPD_VPSR_OFFSET,r18 1258 adds r19=VMM_VPD_VPSR_OFFSET,r18
1269 ;; 1259 ;;
1270 ld8 r19=[r19] //vpsr 1260 ld8 r19=[r19] //vpsr
1271 mov r25=r18 1261 mov r25=r18
1272 adds r16= VMM_VCPU_GP_OFFSET,r21 1262 adds r16= VMM_VCPU_GP_OFFSET,r21
1273 ;; 1263 ;;
1274 ld8 r16= [r16] // Put gp in r24 1264 ld8 r16= [r16] // Put gp in r24
1275 movl r24=@gprel(ia64_vmm_entry) // calculate return address 1265 movl r24=@gprel(ia64_vmm_entry) // calculate return address
1276 ;; 1266 ;;
1277 add r24=r24,r16 1267 add r24=r24,r16
1278 ;; 1268 ;;
1279 br.sptk.many kvm_vps_sync_write // call the service 1269 br.sptk.many kvm_vps_sync_write // call the service
1280 ;; 1270 ;;
1281END(ia64_leave_hypervisor) 1271END(ia64_leave_hypervisor)
1282// fall through 1272// fall through
1283GLOBAL_ENTRY(ia64_vmm_entry) 1273GLOBAL_ENTRY(ia64_vmm_entry)
@@ -1290,16 +1280,14 @@ GLOBAL_ENTRY(ia64_vmm_entry)
1290 * r22:b0 1280 * r22:b0
1291 * r23:predicate 1281 * r23:predicate
1292 */ 1282 */
1293 mov r24=r22 1283 mov r24=r22
1294 mov r25=r18 1284 mov r25=r18
1295 tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic 1285 tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
1296 (p1) br.cond.sptk.few kvm_vps_resume_normal 1286(p1) br.cond.sptk.few kvm_vps_resume_normal
1297 (p2) br.cond.sptk.many kvm_vps_resume_handler 1287(p2) br.cond.sptk.many kvm_vps_resume_handler
1298 ;; 1288 ;;
1299END(ia64_vmm_entry) 1289END(ia64_vmm_entry)
1300 1290
1301
1302
1303/* 1291/*
1304 * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, 1292 * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
1305 * u64 arg3, u64 arg4, u64 arg5, 1293 * u64 arg3, u64 arg4, u64 arg5,
@@ -1317,88 +1305,88 @@ psrsave = loc2
1317entry = loc3 1305entry = loc3
1318hostret = r24 1306hostret = r24
1319 1307
1320 alloc pfssave=ar.pfs,4,4,0,0 1308 alloc pfssave=ar.pfs,4,4,0,0
1321 mov rpsave=rp 1309 mov rpsave=rp
1322 adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13 1310 adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13
1323 ;; 1311 ;;
1324 ld8 entry=[entry] 1312 ld8 entry=[entry]
13251: mov hostret=ip 13131: mov hostret=ip
1326 mov r25=in1 // copy arguments 1314 mov r25=in1 // copy arguments
1327 mov r26=in2 1315 mov r26=in2
1328 mov r27=in3 1316 mov r27=in3
1329 mov psrsave=psr 1317 mov psrsave=psr
1330 ;; 1318 ;;
1331 tbit.nz p6,p0=psrsave,14 // IA64_PSR_I 1319 tbit.nz p6,p0=psrsave,14 // IA64_PSR_I
1332 tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC 1320 tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC
1333 ;; 1321 ;;
1334 add hostret=2f-1b,hostret // calculate return address 1322 add hostret=2f-1b,hostret // calculate return address
1335 add entry=entry,in0 1323 add entry=entry,in0
1336 ;; 1324 ;;
1337 rsm psr.i | psr.ic 1325 rsm psr.i | psr.ic
1338 ;; 1326 ;;
1339 srlz.i 1327 srlz.i
1340 mov b6=entry 1328 mov b6=entry
1341 br.cond.sptk b6 // call the service 1329 br.cond.sptk b6 // call the service
13422: 13302:
1343 // Architectural sequence for enabling interrupts if necessary 1331// Architectural sequence for enabling interrupts if necessary
1344(p7) ssm psr.ic 1332(p7) ssm psr.ic
1345 ;; 1333 ;;
1346(p7) srlz.i 1334(p7) srlz.i
1347 ;; 1335 ;;
1348//(p6) ssm psr.i 1336//(p6) ssm psr.i
1349 ;; 1337 ;;
1350 mov rp=rpsave 1338 mov rp=rpsave
1351 mov ar.pfs=pfssave 1339 mov ar.pfs=pfssave
1352 mov r8=r31 1340 mov r8=r31
1353 ;; 1341 ;;
1354 srlz.d 1342 srlz.d
1355 br.ret.sptk rp 1343 br.ret.sptk rp
1356 1344
1357END(ia64_call_vsa) 1345END(ia64_call_vsa)
1358 1346
1359#define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100) 1347#define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100)
1360 1348
1361GLOBAL_ENTRY(vmm_reset_entry) 1349GLOBAL_ENTRY(vmm_reset_entry)
1362 //set up ipsr, iip, vpd.vpsr, dcr 1350 //set up ipsr, iip, vpd.vpsr, dcr
1363 // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 1351 // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
1364 // For DCR: all bits 0 1352 // For DCR: all bits 0
1365 bsw.0 1353 bsw.0
1366 ;; 1354 ;;
1367 mov r21 =r13 1355 mov r21 =r13
1368 adds r14=-VMM_PT_REGS_SIZE, r12 1356 adds r14=-VMM_PT_REGS_SIZE, r12
1369 ;; 1357 ;;
1370 movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 1358 movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
1371 movl r10=0x8000000000000000 1359 movl r10=0x8000000000000000
1372 adds r16=PT(CR_IIP), r14 1360 adds r16=PT(CR_IIP), r14
1373 adds r20=PT(R1), r14 1361 adds r20=PT(R1), r14
1374 ;; 1362 ;;
1375 rsm psr.ic | psr.i 1363 rsm psr.ic | psr.i
1376 ;; 1364 ;;
1377 srlz.i 1365 srlz.i
1378 ;; 1366 ;;
1379 mov ar.rsc = 0 1367 mov ar.rsc = 0
1380 ;; 1368 ;;
1381 flushrs 1369 flushrs
1382 ;; 1370 ;;
1383 mov ar.bspstore = 0 1371 mov ar.bspstore = 0
1384 // clear BSPSTORE 1372 // clear BSPSTORE
1385 ;; 1373 ;;
1386 mov cr.ipsr=r6 1374 mov cr.ipsr=r6
1387 mov cr.ifs=r10 1375 mov cr.ifs=r10
1388 ld8 r4 = [r16] // Set init iip for first run. 1376 ld8 r4 = [r16] // Set init iip for first run.
1389 ld8 r1 = [r20] 1377 ld8 r1 = [r20]
1390 ;; 1378 ;;
1391 mov cr.iip=r4 1379 mov cr.iip=r4
1392 adds r16=VMM_VPD_BASE_OFFSET,r13 1380 adds r16=VMM_VPD_BASE_OFFSET,r13
1393 ;; 1381 ;;
1394 ld8 r18=[r16] 1382 ld8 r18=[r16]
1395 ;; 1383 ;;
1396 adds r19=VMM_VPD_VPSR_OFFSET,r18 1384 adds r19=VMM_VPD_VPSR_OFFSET,r18
1397 ;; 1385 ;;
1398 ld8 r19=[r19] 1386 ld8 r19=[r19]
1399 mov r17=r0 1387 mov r17=r0
1400 mov r22=r0 1388 mov r22=r0
1401 mov r23=r0 1389 mov r23=r0
1402 br.cond.sptk ia64_vmm_entry 1390 br.cond.sptk ia64_vmm_entry
1403 br.ret.sptk b0 1391 br.ret.sptk b0
1404END(vmm_reset_entry) 1392END(vmm_reset_entry)