aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorXiantao Zhang <xiantao.zhang@intel.com>2008-09-12 18:21:22 -0400
committerAvi Kivity <avi@redhat.com>2008-10-15 08:25:13 -0400
commit1f095610aabb9d54617901aa734d2a6093f2000c (patch)
treea5fbfe954fe8019968a5f5cec724172f0447dd4c /arch
parent81aec5227eedf9035e8544d8021ca6b8fb7c357a (diff)
KVM: ia64: add support for Tukwila processors
In Tukwila processor, VT-i has been enhanced in its implementation, it is often called VT-i2 techonology. With VTi-2 support, virtulization performance should be improved. In this patch, we added the related stuff to support kvm/ia64 for Tukwila processors. Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/kvm/optvfault.S112
1 files changed, 91 insertions, 21 deletions
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S
index f0bf0a8efa3e..634abad979b5 100644
--- a/arch/ia64/kvm/optvfault.S
+++ b/arch/ia64/kvm/optvfault.S
@@ -1,9 +1,12 @@
1/* 1/*
2 * arch/ia64/vmx/optvfault.S 2 * arch/ia64/kvm/optvfault.S
3 * optimize virtualization fault handler 3 * optimize virtualization fault handler
4 * 4 *
5 * Copyright (C) 2006 Intel Co 5 * Copyright (C) 2006 Intel Co
6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> 6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
7 * Copyright (C) 2008 Intel Co
8 * Add the support for Tukwila processors.
9 * Xiantao Zhang <xiantao.zhang@intel.com>
7 */ 10 */
8 11
9#include <asm/asmmacro.h> 12#include <asm/asmmacro.h>
@@ -20,6 +23,29 @@
20#define ACCE_MOV_TO_PSR 23#define ACCE_MOV_TO_PSR
21#define ACCE_THASH 24#define ACCE_THASH
22 25
26#define VMX_VPS_SYNC_READ \
27 add r16=VMM_VPD_BASE_OFFSET,r21; \
28 mov r17 = b0; \
29 mov r18 = r24; \
30 mov r19 = r25; \
31 mov r20 = r31; \
32 ;; \
33{.mii; \
34 ld8 r16 = [r16]; \
35 nop 0x0; \
36 mov r24 = ip; \
37 ;; \
38}; \
39{.mmb; \
40 add r24=0x20, r24; \
41 mov r25 =r16; \
42 br.sptk.many kvm_vps_sync_read; \
43}; \
44 mov b0 = r17; \
45 mov r24 = r18; \
46 mov r25 = r19; \
47 mov r31 = r20
48
23ENTRY(kvm_vps_entry) 49ENTRY(kvm_vps_entry)
24 adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21 50 adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
25 ;; 51 ;;
@@ -226,11 +252,11 @@ GLOBAL_ENTRY(kvm_asm_rsm)
226#ifndef ACCE_RSM 252#ifndef ACCE_RSM
227 br.many kvm_virtualization_fault_back 253 br.many kvm_virtualization_fault_back
228#endif 254#endif
229 add r16=VMM_VPD_BASE_OFFSET,r21 255 VMX_VPS_SYNC_READ
256 ;;
230 extr.u r26=r25,6,21 257 extr.u r26=r25,6,21
231 extr.u r27=r25,31,2 258 extr.u r27=r25,31,2
232 ;; 259 ;;
233 ld8 r16=[r16]
234 extr.u r28=r25,36,1 260 extr.u r28=r25,36,1
235 dep r26=r27,r26,21,2 261 dep r26=r27,r26,21,2
236 ;; 262 ;;
@@ -265,7 +291,7 @@ GLOBAL_ENTRY(kvm_asm_rsm)
265 tbit.nz p6,p0=r23,0 291 tbit.nz p6,p0=r23,0
266 ;; 292 ;;
267 tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT 293 tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
268 (p6) br.dptk kvm_resume_to_guest 294 (p6) br.dptk kvm_resume_to_guest_with_sync
269 ;; 295 ;;
270 add r26=VMM_VCPU_META_RR0_OFFSET,r21 296 add r26=VMM_VCPU_META_RR0_OFFSET,r21
271 add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 297 add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
@@ -281,7 +307,7 @@ GLOBAL_ENTRY(kvm_asm_rsm)
281 mov rr[r28]=r27 307 mov rr[r28]=r27
282 ;; 308 ;;
283 srlz.d 309 srlz.d
284 br.many kvm_resume_to_guest 310 br.many kvm_resume_to_guest_with_sync
285END(kvm_asm_rsm) 311END(kvm_asm_rsm)
286 312
287 313
@@ -290,11 +316,11 @@ GLOBAL_ENTRY(kvm_asm_ssm)
290#ifndef ACCE_SSM 316#ifndef ACCE_SSM
291 br.many kvm_virtualization_fault_back 317 br.many kvm_virtualization_fault_back
292#endif 318#endif
293 add r16=VMM_VPD_BASE_OFFSET,r21 319 VMX_VPS_SYNC_READ
320 ;;
294 extr.u r26=r25,6,21 321 extr.u r26=r25,6,21
295 extr.u r27=r25,31,2 322 extr.u r27=r25,31,2
296 ;; 323 ;;
297 ld8 r16=[r16]
298 extr.u r28=r25,36,1 324 extr.u r28=r25,36,1
299 dep r26=r27,r26,21,2 325 dep r26=r27,r26,21,2
300 ;; //r26 is imm24 326 ;; //r26 is imm24
@@ -340,7 +366,7 @@ kvm_asm_ssm_1:
340 tbit.nz p6,p0=r29,IA64_PSR_I_BIT 366 tbit.nz p6,p0=r29,IA64_PSR_I_BIT
341 ;; 367 ;;
342 tbit.z.or p6,p0=r19,IA64_PSR_I_BIT 368 tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
343 (p6) br.dptk kvm_resume_to_guest 369 (p6) br.dptk kvm_resume_to_guest_with_sync
344 ;; 370 ;;
345 add r29=VPD_VTPR_START_OFFSET,r16 371 add r29=VPD_VTPR_START_OFFSET,r16
346 add r30=VPD_VHPI_START_OFFSET,r16 372 add r30=VPD_VHPI_START_OFFSET,r16
@@ -355,7 +381,7 @@ kvm_asm_ssm_1:
355 ;; 381 ;;
356 cmp.gt p6,p0=r30,r17 382 cmp.gt p6,p0=r30,r17
357 (p6) br.dpnt.few kvm_asm_dispatch_vexirq 383 (p6) br.dpnt.few kvm_asm_dispatch_vexirq
358 br.many kvm_resume_to_guest 384 br.many kvm_resume_to_guest_with_sync
359END(kvm_asm_ssm) 385END(kvm_asm_ssm)
360 386
361 387
@@ -364,10 +390,9 @@ GLOBAL_ENTRY(kvm_asm_mov_to_psr)
364#ifndef ACCE_MOV_TO_PSR 390#ifndef ACCE_MOV_TO_PSR
365 br.many kvm_virtualization_fault_back 391 br.many kvm_virtualization_fault_back
366#endif 392#endif
367 add r16=VMM_VPD_BASE_OFFSET,r21 393 VMX_VPS_SYNC_READ
368 extr.u r26=r25,13,7 //r2
369 ;; 394 ;;
370 ld8 r16=[r16] 395 extr.u r26=r25,13,7 //r2
371 addl r20=@gprel(asm_mov_from_reg),gp 396 addl r20=@gprel(asm_mov_from_reg),gp
372 ;; 397 ;;
373 adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20 398 adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
@@ -443,7 +468,7 @@ kvm_asm_mov_to_psr_1:
443 ;; 468 ;;
444 tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT 469 tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
445 tbit.z.or p6,p0=r30,IA64_PSR_I_BIT 470 tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
446 (p6) br.dpnt.few kvm_resume_to_guest 471 (p6) br.dpnt.few kvm_resume_to_guest_with_sync
447 ;; 472 ;;
448 add r29=VPD_VTPR_START_OFFSET,r16 473 add r29=VPD_VTPR_START_OFFSET,r16
449 add r30=VPD_VHPI_START_OFFSET,r16 474 add r30=VPD_VHPI_START_OFFSET,r16
@@ -458,13 +483,29 @@ kvm_asm_mov_to_psr_1:
458 ;; 483 ;;
459 cmp.gt p6,p0=r30,r17 484 cmp.gt p6,p0=r30,r17
460 (p6) br.dpnt.few kvm_asm_dispatch_vexirq 485 (p6) br.dpnt.few kvm_asm_dispatch_vexirq
461 br.many kvm_resume_to_guest 486 br.many kvm_resume_to_guest_with_sync
462END(kvm_asm_mov_to_psr) 487END(kvm_asm_mov_to_psr)
463 488
464 489
465ENTRY(kvm_asm_dispatch_vexirq) 490ENTRY(kvm_asm_dispatch_vexirq)
466//increment iip 491//increment iip
492 mov r17 = b0
493 mov r18 = r31
494{.mii
495 add r25=VMM_VPD_BASE_OFFSET,r21
496 nop 0x0
497 mov r24 = ip
498 ;;
499}
500{.mmb
501 add r24 = 0x20, r24
502 ld8 r25 = [r25]
503 br.sptk.many kvm_vps_sync_write
504}
505 mov b0 =r17
467 mov r16=cr.ipsr 506 mov r16=cr.ipsr
507 mov r31 = r18
508 mov r19 = 37
468 ;; 509 ;;
469 extr.u r17=r16,IA64_PSR_RI_BIT,2 510 extr.u r17=r16,IA64_PSR_RI_BIT,2
470 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 511 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
@@ -504,25 +545,31 @@ GLOBAL_ENTRY(kvm_asm_thash)
504 ;; 545 ;;
505kvm_asm_thash_back1: 546kvm_asm_thash_back1:
506 shr.u r23=r19,61 // get RR number 547 shr.u r23=r19,61 // get RR number
507 adds r25=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr 548 adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
508 adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta 549 adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta
509 ;; 550 ;;
510 shladd r27=r23,3,r25 // get vcpu->arch.vrr[r23]'s addr 551 shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr
511 ld8 r17=[r16] // get PTA 552 ld8 r17=[r16] // get PTA
512 mov r26=1 553 mov r26=1
513 ;; 554 ;;
514 extr.u r29=r17,2,6 // get pta.size 555 extr.u r29=r17,2,6 // get pta.size
515 ld8 r25=[r27] // get vcpu->arch.vrr[r23]'s value 556 ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value
516 ;; 557 ;;
517 extr.u r25=r25,2,6 // get rr.ps 558 mov b0=r24
559 //Fallback to C if pta.vf is set
560 tbit.nz p6,p0=r17, 8
561 ;;
562 (p6) mov r24=EVENT_THASH
563 (p6) br.cond.dpnt.many kvm_virtualization_fault_back
564 extr.u r28=r28,2,6 // get rr.ps
518 shl r22=r26,r29 // 1UL << pta.size 565 shl r22=r26,r29 // 1UL << pta.size
519 ;; 566 ;;
520 shr.u r23=r19,r25 // vaddr >> rr.ps 567 shr.u r23=r19,r28 // vaddr >> rr.ps
521 adds r26=3,r29 // pta.size + 3 568 adds r26=3,r29 // pta.size + 3
522 shl r27=r17,3 // pta << 3 569 shl r27=r17,3 // pta << 3
523 ;; 570 ;;
524 shl r23=r23,3 // (vaddr >> rr.ps) << 3 571 shl r23=r23,3 // (vaddr >> rr.ps) << 3
525 shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3) 572 shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
526 movl r16=7<<61 573 movl r16=7<<61
527 ;; 574 ;;
528 adds r22=-1,r22 // (1UL << pta.size) - 1 575 adds r22=-1,r22 // (1UL << pta.size) - 1
@@ -793,6 +840,29 @@ END(asm_mov_from_reg)
793 * r31: pr 840 * r31: pr
794 * r24: b0 841 * r24: b0
795 */ 842 */
843ENTRY(kvm_resume_to_guest_with_sync)
844 adds r19=VMM_VPD_BASE_OFFSET,r21
845 mov r16 = r31
846 mov r17 = r24
847 ;;
848{.mii
849 ld8 r25 =[r19]
850 nop 0x0
851 mov r24 = ip
852 ;;
853}
854{.mmb
855 add r24 =0x20, r24
856 nop 0x0
857 br.sptk.many kvm_vps_sync_write
858}
859
860 mov r31 = r16
861 mov r24 =r17
862 ;;
863 br.sptk.many kvm_resume_to_guest
864END(kvm_resume_to_guest_with_sync)
865
796ENTRY(kvm_resume_to_guest) 866ENTRY(kvm_resume_to_guest)
797 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 867 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
798 ;; 868 ;;