diff options
Diffstat (limited to 'arch/ia64/kvm/optvfault.S')
-rw-r--r-- | arch/ia64/kvm/optvfault.S | 181 |
1 files changed, 160 insertions, 21 deletions
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S index e4f15d641b22..634abad979b5 100644 --- a/arch/ia64/kvm/optvfault.S +++ b/arch/ia64/kvm/optvfault.S | |||
@@ -1,9 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * arch/ia64/vmx/optvfault.S | 2 | * arch/ia64/kvm/optvfault.S |
3 | * optimize virtualization fault handler | 3 | * optimize virtualization fault handler |
4 | * | 4 | * |
5 | * Copyright (C) 2006 Intel Co | 5 | * Copyright (C) 2006 Intel Co |
6 | * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> | 6 | * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> |
7 | * Copyright (C) 2008 Intel Co | ||
8 | * Add the support for Tukwila processors. | ||
9 | * Xiantao Zhang <xiantao.zhang@intel.com> | ||
7 | */ | 10 | */ |
8 | 11 | ||
9 | #include <asm/asmmacro.h> | 12 | #include <asm/asmmacro.h> |
@@ -20,6 +23,98 @@ | |||
20 | #define ACCE_MOV_TO_PSR | 23 | #define ACCE_MOV_TO_PSR |
21 | #define ACCE_THASH | 24 | #define ACCE_THASH |
22 | 25 | ||
26 | #define VMX_VPS_SYNC_READ \ | ||
27 | add r16=VMM_VPD_BASE_OFFSET,r21; \ | ||
28 | mov r17 = b0; \ | ||
29 | mov r18 = r24; \ | ||
30 | mov r19 = r25; \ | ||
31 | mov r20 = r31; \ | ||
32 | ;; \ | ||
33 | {.mii; \ | ||
34 | ld8 r16 = [r16]; \ | ||
35 | nop 0x0; \ | ||
36 | mov r24 = ip; \ | ||
37 | ;; \ | ||
38 | }; \ | ||
39 | {.mmb; \ | ||
40 | add r24=0x20, r24; \ | ||
41 | mov r25 =r16; \ | ||
42 | br.sptk.many kvm_vps_sync_read; \ | ||
43 | }; \ | ||
44 | mov b0 = r17; \ | ||
45 | mov r24 = r18; \ | ||
46 | mov r25 = r19; \ | ||
47 | mov r31 = r20 | ||
48 | |||
49 | ENTRY(kvm_vps_entry) | ||
50 | adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21 | ||
51 | ;; | ||
52 | ld8 r29 = [r29] | ||
53 | ;; | ||
54 | add r29 = r29, r30 | ||
55 | ;; | ||
56 | mov b0 = r29 | ||
57 | br.sptk.many b0 | ||
58 | END(kvm_vps_entry) | ||
59 | |||
60 | /* | ||
61 | * Inputs: | ||
62 | * r24 : return address | ||
63 | * r25 : vpd | ||
64 | * r29 : scratch | ||
65 | * | ||
66 | */ | ||
67 | GLOBAL_ENTRY(kvm_vps_sync_read) | ||
68 | movl r30 = PAL_VPS_SYNC_READ | ||
69 | ;; | ||
70 | br.sptk.many kvm_vps_entry | ||
71 | END(kvm_vps_sync_read) | ||
72 | |||
73 | /* | ||
74 | * Inputs: | ||
75 | * r24 : return address | ||
76 | * r25 : vpd | ||
77 | * r29 : scratch | ||
78 | * | ||
79 | */ | ||
80 | GLOBAL_ENTRY(kvm_vps_sync_write) | ||
81 | movl r30 = PAL_VPS_SYNC_WRITE | ||
82 | ;; | ||
83 | br.sptk.many kvm_vps_entry | ||
84 | END(kvm_vps_sync_write) | ||
85 | |||
86 | /* | ||
87 | * Inputs: | ||
88 | * r23 : pr | ||
89 | * r24 : guest b0 | ||
90 | * r25 : vpd | ||
91 | * | ||
92 | */ | ||
93 | GLOBAL_ENTRY(kvm_vps_resume_normal) | ||
94 | movl r30 = PAL_VPS_RESUME_NORMAL | ||
95 | ;; | ||
96 | mov pr=r23,-2 | ||
97 | br.sptk.many kvm_vps_entry | ||
98 | END(kvm_vps_resume_normal) | ||
99 | |||
100 | /* | ||
101 | * Inputs: | ||
102 | * r23 : pr | ||
103 | * r24 : guest b0 | ||
104 | * r25 : vpd | ||
105 | * r17 : isr | ||
106 | */ | ||
107 | GLOBAL_ENTRY(kvm_vps_resume_handler) | ||
108 | movl r30 = PAL_VPS_RESUME_HANDLER | ||
109 | ;; | ||
110 | ld8 r27=[r25] | ||
111 | shr r17=r17,IA64_ISR_IR_BIT | ||
112 | ;; | ||
113 | dep r27=r17,r27,63,1 // bit 63 of r27 indicate whether enable CFLE | ||
114 | mov pr=r23,-2 | ||
115 | br.sptk.many kvm_vps_entry | ||
116 | END(kvm_vps_resume_handler) | ||
117 | |||
23 | //mov r1=ar3 | 118 | //mov r1=ar3 |
24 | GLOBAL_ENTRY(kvm_asm_mov_from_ar) | 119 | GLOBAL_ENTRY(kvm_asm_mov_from_ar) |
25 | #ifndef ACCE_MOV_FROM_AR | 120 | #ifndef ACCE_MOV_FROM_AR |
@@ -157,11 +252,11 @@ GLOBAL_ENTRY(kvm_asm_rsm) | |||
157 | #ifndef ACCE_RSM | 252 | #ifndef ACCE_RSM |
158 | br.many kvm_virtualization_fault_back | 253 | br.many kvm_virtualization_fault_back |
159 | #endif | 254 | #endif |
160 | add r16=VMM_VPD_BASE_OFFSET,r21 | 255 | VMX_VPS_SYNC_READ |
256 | ;; | ||
161 | extr.u r26=r25,6,21 | 257 | extr.u r26=r25,6,21 |
162 | extr.u r27=r25,31,2 | 258 | extr.u r27=r25,31,2 |
163 | ;; | 259 | ;; |
164 | ld8 r16=[r16] | ||
165 | extr.u r28=r25,36,1 | 260 | extr.u r28=r25,36,1 |
166 | dep r26=r27,r26,21,2 | 261 | dep r26=r27,r26,21,2 |
167 | ;; | 262 | ;; |
@@ -196,7 +291,7 @@ GLOBAL_ENTRY(kvm_asm_rsm) | |||
196 | tbit.nz p6,p0=r23,0 | 291 | tbit.nz p6,p0=r23,0 |
197 | ;; | 292 | ;; |
198 | tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT | 293 | tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT |
199 | (p6) br.dptk kvm_resume_to_guest | 294 | (p6) br.dptk kvm_resume_to_guest_with_sync |
200 | ;; | 295 | ;; |
201 | add r26=VMM_VCPU_META_RR0_OFFSET,r21 | 296 | add r26=VMM_VCPU_META_RR0_OFFSET,r21 |
202 | add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 | 297 | add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 |
@@ -212,7 +307,7 @@ GLOBAL_ENTRY(kvm_asm_rsm) | |||
212 | mov rr[r28]=r27 | 307 | mov rr[r28]=r27 |
213 | ;; | 308 | ;; |
214 | srlz.d | 309 | srlz.d |
215 | br.many kvm_resume_to_guest | 310 | br.many kvm_resume_to_guest_with_sync |
216 | END(kvm_asm_rsm) | 311 | END(kvm_asm_rsm) |
217 | 312 | ||
218 | 313 | ||
@@ -221,11 +316,11 @@ GLOBAL_ENTRY(kvm_asm_ssm) | |||
221 | #ifndef ACCE_SSM | 316 | #ifndef ACCE_SSM |
222 | br.many kvm_virtualization_fault_back | 317 | br.many kvm_virtualization_fault_back |
223 | #endif | 318 | #endif |
224 | add r16=VMM_VPD_BASE_OFFSET,r21 | 319 | VMX_VPS_SYNC_READ |
320 | ;; | ||
225 | extr.u r26=r25,6,21 | 321 | extr.u r26=r25,6,21 |
226 | extr.u r27=r25,31,2 | 322 | extr.u r27=r25,31,2 |
227 | ;; | 323 | ;; |
228 | ld8 r16=[r16] | ||
229 | extr.u r28=r25,36,1 | 324 | extr.u r28=r25,36,1 |
230 | dep r26=r27,r26,21,2 | 325 | dep r26=r27,r26,21,2 |
231 | ;; //r26 is imm24 | 326 | ;; //r26 is imm24 |
@@ -271,7 +366,7 @@ kvm_asm_ssm_1: | |||
271 | tbit.nz p6,p0=r29,IA64_PSR_I_BIT | 366 | tbit.nz p6,p0=r29,IA64_PSR_I_BIT |
272 | ;; | 367 | ;; |
273 | tbit.z.or p6,p0=r19,IA64_PSR_I_BIT | 368 | tbit.z.or p6,p0=r19,IA64_PSR_I_BIT |
274 | (p6) br.dptk kvm_resume_to_guest | 369 | (p6) br.dptk kvm_resume_to_guest_with_sync |
275 | ;; | 370 | ;; |
276 | add r29=VPD_VTPR_START_OFFSET,r16 | 371 | add r29=VPD_VTPR_START_OFFSET,r16 |
277 | add r30=VPD_VHPI_START_OFFSET,r16 | 372 | add r30=VPD_VHPI_START_OFFSET,r16 |
@@ -286,7 +381,7 @@ kvm_asm_ssm_1: | |||
286 | ;; | 381 | ;; |
287 | cmp.gt p6,p0=r30,r17 | 382 | cmp.gt p6,p0=r30,r17 |
288 | (p6) br.dpnt.few kvm_asm_dispatch_vexirq | 383 | (p6) br.dpnt.few kvm_asm_dispatch_vexirq |
289 | br.many kvm_resume_to_guest | 384 | br.many kvm_resume_to_guest_with_sync |
290 | END(kvm_asm_ssm) | 385 | END(kvm_asm_ssm) |
291 | 386 | ||
292 | 387 | ||
@@ -295,10 +390,9 @@ GLOBAL_ENTRY(kvm_asm_mov_to_psr) | |||
295 | #ifndef ACCE_MOV_TO_PSR | 390 | #ifndef ACCE_MOV_TO_PSR |
296 | br.many kvm_virtualization_fault_back | 391 | br.many kvm_virtualization_fault_back |
297 | #endif | 392 | #endif |
298 | add r16=VMM_VPD_BASE_OFFSET,r21 | 393 | VMX_VPS_SYNC_READ |
299 | extr.u r26=r25,13,7 //r2 | ||
300 | ;; | 394 | ;; |
301 | ld8 r16=[r16] | 395 | extr.u r26=r25,13,7 //r2 |
302 | addl r20=@gprel(asm_mov_from_reg),gp | 396 | addl r20=@gprel(asm_mov_from_reg),gp |
303 | ;; | 397 | ;; |
304 | adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20 | 398 | adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20 |
@@ -374,7 +468,7 @@ kvm_asm_mov_to_psr_1: | |||
374 | ;; | 468 | ;; |
375 | tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT | 469 | tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT |
376 | tbit.z.or p6,p0=r30,IA64_PSR_I_BIT | 470 | tbit.z.or p6,p0=r30,IA64_PSR_I_BIT |
377 | (p6) br.dpnt.few kvm_resume_to_guest | 471 | (p6) br.dpnt.few kvm_resume_to_guest_with_sync |
378 | ;; | 472 | ;; |
379 | add r29=VPD_VTPR_START_OFFSET,r16 | 473 | add r29=VPD_VTPR_START_OFFSET,r16 |
380 | add r30=VPD_VHPI_START_OFFSET,r16 | 474 | add r30=VPD_VHPI_START_OFFSET,r16 |
@@ -389,13 +483,29 @@ kvm_asm_mov_to_psr_1: | |||
389 | ;; | 483 | ;; |
390 | cmp.gt p6,p0=r30,r17 | 484 | cmp.gt p6,p0=r30,r17 |
391 | (p6) br.dpnt.few kvm_asm_dispatch_vexirq | 485 | (p6) br.dpnt.few kvm_asm_dispatch_vexirq |
392 | br.many kvm_resume_to_guest | 486 | br.many kvm_resume_to_guest_with_sync |
393 | END(kvm_asm_mov_to_psr) | 487 | END(kvm_asm_mov_to_psr) |
394 | 488 | ||
395 | 489 | ||
396 | ENTRY(kvm_asm_dispatch_vexirq) | 490 | ENTRY(kvm_asm_dispatch_vexirq) |
397 | //increment iip | 491 | //increment iip |
492 | mov r17 = b0 | ||
493 | mov r18 = r31 | ||
494 | {.mii | ||
495 | add r25=VMM_VPD_BASE_OFFSET,r21 | ||
496 | nop 0x0 | ||
497 | mov r24 = ip | ||
498 | ;; | ||
499 | } | ||
500 | {.mmb | ||
501 | add r24 = 0x20, r24 | ||
502 | ld8 r25 = [r25] | ||
503 | br.sptk.many kvm_vps_sync_write | ||
504 | } | ||
505 | mov b0 =r17 | ||
398 | mov r16=cr.ipsr | 506 | mov r16=cr.ipsr |
507 | mov r31 = r18 | ||
508 | mov r19 = 37 | ||
399 | ;; | 509 | ;; |
400 | extr.u r17=r16,IA64_PSR_RI_BIT,2 | 510 | extr.u r17=r16,IA64_PSR_RI_BIT,2 |
401 | tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 | 511 | tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 |
@@ -435,25 +545,31 @@ GLOBAL_ENTRY(kvm_asm_thash) | |||
435 | ;; | 545 | ;; |
436 | kvm_asm_thash_back1: | 546 | kvm_asm_thash_back1: |
437 | shr.u r23=r19,61 // get RR number | 547 | shr.u r23=r19,61 // get RR number |
438 | adds r25=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr | 548 | adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr |
439 | adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta | 549 | adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta |
440 | ;; | 550 | ;; |
441 | shladd r27=r23,3,r25 // get vcpu->arch.vrr[r23]'s addr | 551 | shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr |
442 | ld8 r17=[r16] // get PTA | 552 | ld8 r17=[r16] // get PTA |
443 | mov r26=1 | 553 | mov r26=1 |
444 | ;; | 554 | ;; |
445 | extr.u r29=r17,2,6 // get pta.size | 555 | extr.u r29=r17,2,6 // get pta.size |
446 | ld8 r25=[r27] // get vcpu->arch.vrr[r23]'s value | 556 | ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value |
447 | ;; | 557 | ;; |
448 | extr.u r25=r25,2,6 // get rr.ps | 558 | mov b0=r24 |
559 | //Fallback to C if pta.vf is set | ||
560 | tbit.nz p6,p0=r17, 8 | ||
561 | ;; | ||
562 | (p6) mov r24=EVENT_THASH | ||
563 | (p6) br.cond.dpnt.many kvm_virtualization_fault_back | ||
564 | extr.u r28=r28,2,6 // get rr.ps | ||
449 | shl r22=r26,r29 // 1UL << pta.size | 565 | shl r22=r26,r29 // 1UL << pta.size |
450 | ;; | 566 | ;; |
451 | shr.u r23=r19,r25 // vaddr >> rr.ps | 567 | shr.u r23=r19,r28 // vaddr >> rr.ps |
452 | adds r26=3,r29 // pta.size + 3 | 568 | adds r26=3,r29 // pta.size + 3 |
453 | shl r27=r17,3 // pta << 3 | 569 | shl r27=r17,3 // pta << 3 |
454 | ;; | 570 | ;; |
455 | shl r23=r23,3 // (vaddr >> rr.ps) << 3 | 571 | shl r23=r23,3 // (vaddr >> rr.ps) << 3 |
456 | shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3) | 572 | shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3) |
457 | movl r16=7<<61 | 573 | movl r16=7<<61 |
458 | ;; | 574 | ;; |
459 | adds r22=-1,r22 // (1UL << pta.size) - 1 | 575 | adds r22=-1,r22 // (1UL << pta.size) - 1 |
@@ -724,6 +840,29 @@ END(asm_mov_from_reg) | |||
724 | * r31: pr | 840 | * r31: pr |
725 | * r24: b0 | 841 | * r24: b0 |
726 | */ | 842 | */ |
843 | ENTRY(kvm_resume_to_guest_with_sync) | ||
844 | adds r19=VMM_VPD_BASE_OFFSET,r21 | ||
845 | mov r16 = r31 | ||
846 | mov r17 = r24 | ||
847 | ;; | ||
848 | {.mii | ||
849 | ld8 r25 =[r19] | ||
850 | nop 0x0 | ||
851 | mov r24 = ip | ||
852 | ;; | ||
853 | } | ||
854 | {.mmb | ||
855 | add r24 =0x20, r24 | ||
856 | nop 0x0 | ||
857 | br.sptk.many kvm_vps_sync_write | ||
858 | } | ||
859 | |||
860 | mov r31 = r16 | ||
861 | mov r24 =r17 | ||
862 | ;; | ||
863 | br.sptk.many kvm_resume_to_guest | ||
864 | END(kvm_resume_to_guest_with_sync) | ||
865 | |||
727 | ENTRY(kvm_resume_to_guest) | 866 | ENTRY(kvm_resume_to_guest) |
728 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | 867 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 |
729 | ;; | 868 | ;; |